code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil __a = 1_00 __a = set(range(3, NUM_PRIMES, 2)) primes.add(2) __a = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowercase ): '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} snake_case_ :set[int] = set() snake_case_ :int snake_case_ :int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowercase = 5000 ): '''simple docstring''' for number_to_partition in range(1, _lowercase ): if len(partition(_lowercase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" from math import factorial class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple: snake_case_ :List[Any] = real if isinstance(snake_case , snake_case ): snake_case_ :Tuple = [1] * rank else: snake_case_ :Optional[Any] = rank def __repr__( self: List[str] ) -> Tuple: return ( f"""{self.real}+""" f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: snake_case_ :Any = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case ) def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]: if not isinstance(snake_case , snake_case ): return Dual(self.real + other , self.duals ) snake_case_ :List[Any] = self.duals.copy() snake_case_ :Tuple = other.duals.copy() if len(snake_case ) > len(snake_case ): o_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) elif len(snake_case ) < len(snake_case ): s_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) snake_case_ :Dict = [] for i in range(len(snake_case ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case ) _A : str = __add__ def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple: return self + other * -1 def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]: if not isinstance(snake_case , snake_case ): snake_case_ :Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case ) snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case ) _A : int = __mul__ def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case ) raise ValueError def __floordiv__( self: int , snake_case: List[Any] ) -> Any: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case ) raise ValueError def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]: if n < 0 or isinstance(snake_case , snake_case ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self snake_case_ :str = self for _ in range(n - 1 ): x *= self return x def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if not callable(_lowercase ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(_lowercase, (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(_lowercase, _lowercase ): raise ValueError("""differentiate() requires an int as input for order""" ) snake_case_ :Optional[Any] = Dual(_lowercase, 1 ) snake_case_ :List[Any] = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def A_ ( _lowercase ): '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
66
1
"""simple docstring""" __a = "Tobias Carryer" from time import time class lowerCamelCase : '''simple docstring''' def __init__( self: int , snake_case: Any , snake_case: Any , snake_case: Optional[Any] , snake_case: List[Any]=int(time() ) ) -> Optional[Any]: # noqa: B008 snake_case_ :Any = multiplier snake_case_ :Union[str, Any] = increment snake_case_ :Union[str, Any] = modulo snake_case_ :Any = seed def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_ :Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __a = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
66
"""simple docstring""" from __future__ import annotations __a = 10 def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = 1 snake_case_ :List[str] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ :list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ :Any = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints snake_case_ :Optional[Any] = 0 for b in range(_lowercase ): for i in buckets[b]: snake_case_ :Union[str, Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = len(_lowercase ) print("""The following activities are selected:""" ) # The first activity is always selected snake_case_ :Union[str, Any] = 0 print(_lowercase, end=""",""" ) # Consider rest of the activities for j in range(_lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_lowercase, end=""",""" ) snake_case_ :Dict = j if __name__ == "__main__": import doctest doctest.testmod() __a = [1, 3, 0, 5, 8, 5] __a = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :Union[str, Any] = controlnet_params snake_case_ :Union[str, Any] = """bird""" snake_case_ :List[Any] = jax.device_count() snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case_ :Any = jax.random.PRNGKey(0 ) snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() ) snake_case_ :List[Any] = replicate(snake_case ) snake_case_ :List[str] = shard(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :Dict = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1] snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Dict = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :str = controlnet_params snake_case_ :Optional[int] = """Chef in the kitchen""" snake_case_ :Union[str, Any] = jax.device_count() snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case_ :str = jax.random.PRNGKey(0 ) snake_case_ :str = jax.random.split(snake_case , jax.device_count() ) snake_case_ :Tuple = replicate(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :int = shard(snake_case ) snake_case_ :List[str] = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :int = images[0, 253:256, 253:256, -1] snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Optional[int] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
66
1
"""simple docstring""" def A_ ( _lowercase = 3, _lowercase = 7, _lowercase = 1000000 ): '''simple docstring''' snake_case_ :List[Any] = 0 snake_case_ :Any = 1 for current_denominator in range(1, limit + 1 ): snake_case_ :int = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: snake_case_ :List[str] = current_numerator snake_case_ :Any = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_00_00_00))
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case_ :Optional[int] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ :List[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" ) snake_case_ :Dict = """The dog is cute and lives in the garden house""" snake_case_ :Dict = jnp.array([tokenizer.encode(snake_case )] ) snake_case_ :Optional[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim snake_case_ :Optional[Any] = jnp.array( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) snake_case_ :List[Any] = model(snake_case )["""last_hidden_state"""] self.assertEqual(output.shape , snake_case ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case , atol=1E-3 ) )
66
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" ) snake_case_ :Any = json.loads(open(_lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(""".pt""" ): snake_case_ :Optional[int] = args.output + """.pt""" snake_case_ :List[str] = OrderedDict() with tf.device("""/CPU:0""" ): snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir ) snake_case_ :str = reader.get_variable_to_shape_map() for key_name in shapes.keys(): snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): snake_case_ :Any = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): snake_case_ :Optional[int] = 8 snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :List[str] = torch.tensor(_lowercase ) elif key_name.startswith("""model/moe""" ): snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/softmlp/kernel""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): snake_case_ :Dict = key_name[-9:-7] for i in range(16 ): snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) snake_case_ :Tuple = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/mlp""" ): snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p1/bias""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player snake_case_ :str = vnp.copy() # same because it is one dimensional snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/bias""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player snake_case_ :Any = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/ln""" ): snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :int = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.startswith("""model/att""" ): snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum snake_case_ :Dict = state[:, 0, :, :] snake_case_ :int = state[:, 1, :, :] snake_case_ :List[str] = state[:, 2, :, :] snake_case_ :str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[int] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player snake_case_ :int = torch.tensor(_lowercase ) snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player snake_case_ :Dict = torch.tensor(_lowercase ) snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/o/kernel""" ): snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player snake_case_ :str = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = torch.tensor(_lowercase ) elif key_name.startswith("""model/an""" ): snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) if key_name.startswith("""model/wte""" ): snake_case_ :Tuple = """lm_head.weight""" snake_case_ :List[str] = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) elif key_name.startswith("""model/wob""" ): snake_case_ :str = """final_logits_bias""" snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = state.reshape((1, -1) ) snake_case_ :Union[str, Any] = torch.tensor(_lowercase ) elif key_name == "model/dense/kernel": snake_case_ :str = """model.last_project.weight""" snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = torch.tensor(_lowercase ) elif key_name == "model/dense_1/bias": snake_case_ :Optional[int] = """model.last_project.bias""" snake_case_ :Tuple = vnp.copy() # same because it is one dimensional snake_case_ :Any = torch.tensor(_lowercase ) torch.save(_lowercase, args.output ) if __name__ == "__main__": __a = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") __a = parser.parse_args() convert_tf_gptsan_to_pt(args)
66
1
"""simple docstring""" import math def A_ ( _lowercase, _lowercase ): '''simple docstring''' if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_lowercase ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __a = "Enter the base and the power separated by a comma: " __a , __a = map(int, input(prompt).split(",")) __a , __a = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. __a = res(xa, ya) __a = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
66
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __a = pd.read_csv("sample_data.csv", header=None) __a = df.shape[:1][0] # If you're using some other dataset input the target column __a = df.iloc[:, 1:2] __a = actual_data.values.reshape(len_data, 1) __a = MinMaxScaler().fit_transform(actual_data) __a = 10 __a = 5 __a = 20 __a = len_data - periods * look_back __a = actual_data[:division] __a = actual_data[division - look_back :] __a , __a = [], [] __a , __a = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __a = np.array(train_x) __a = np.array(test_x) __a = np.array([list(i.ravel()) for i in train_y]) __a = np.array([list(i.ravel()) for i in test_y]) __a = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") __a = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) __a = model.predict(x_test)
66
1
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: List[Any] , snake_case: Optional[int] , snake_case: Optional[int]=13 , snake_case: str=30 , snake_case: Dict=2 , snake_case: Tuple=3 , snake_case: Optional[Any]=True , snake_case: Optional[Any]=True , snake_case: str=32 , snake_case: List[str]=2 , snake_case: Union[str, Any]=4 , snake_case: Union[str, Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Optional[int]=0.1 , snake_case: str=0.1 , snake_case: Dict=10 , snake_case: Union[str, Any]=0.0_2 , snake_case: Union[str, Any]=3 , snake_case: int=0.6 , snake_case: List[Any]=None , ) -> List[str]: snake_case_ :Optional[int] = parent snake_case_ :Dict = batch_size snake_case_ :Union[str, Any] = image_size snake_case_ :Tuple = patch_size snake_case_ :Union[str, Any] = num_channels snake_case_ :Optional[Any] = is_training snake_case_ :Optional[Any] = use_labels snake_case_ :List[str] = hidden_size snake_case_ :Tuple = num_hidden_layers snake_case_ :Optional[int] = num_attention_heads snake_case_ :Optional[int] = intermediate_size snake_case_ :str = hidden_act snake_case_ :Dict = hidden_dropout_prob snake_case_ :Union[str, Any] = attention_probs_dropout_prob snake_case_ :Any = type_sequence_label_size snake_case_ :Any = initializer_range snake_case_ :Any = mask_ratio snake_case_ :int = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ :Optional[Any] = (image_size // patch_size) ** 2 snake_case_ :Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Tuple = None if self.use_labels: snake_case_ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Tuple: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] , snake_case: List[str] , snake_case: List[str] ) -> Optional[Any]: snake_case_ :str = TFViTMAEModel(config=snake_case ) snake_case_ :Any = model(snake_case , training=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Union[str, Any] , snake_case: Optional[int] , snake_case: Optional[int] ) -> List[Any]: snake_case_ :str = TFViTMAEForPreTraining(snake_case ) snake_case_ :Union[str, Any] = model(snake_case , training=snake_case ) # expected sequence length = num_patches snake_case_ :Union[str, Any] = (self.image_size // self.patch_size) ** 2 snake_case_ :Any = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :List[str] = TFViTMAEForPreTraining(snake_case ) snake_case_ :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :Union[str, Any] = model(snake_case , training=snake_case ) snake_case_ :str = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple: snake_case_ :Union[str, Any] = self.prepare_config_and_inputs() ((snake_case_), (snake_case_), (snake_case_)) :List[str] = config_and_inputs snake_case_ :List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _A : Optional[int] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} _A : int = False _A : Any = False _A : List[str] = False _A : Optional[int] = False def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case_ :str = TFViTMAEModelTester(self ) snake_case_ :str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCAmelCase_ ( self: Any ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: Optional[int] ) -> str: pass def lowerCAmelCase_ ( self: List[Any] ) -> int: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ :Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Layer ) ) def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Dict = model_class(snake_case ) snake_case_ :Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :List[Any] = [*signature.parameters.keys()] snake_case_ :Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: Dict ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: snake_case_ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case ) def lowerCAmelCase_ ( self: int ) -> Any: # make the mask reproducible np.random.seed(2 ) snake_case_, snake_case_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ :List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ :Optional[Any] = model_class(snake_case ) snake_case_ :Optional[int] = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Union[str, Any] = model(snake_case , noise=snake_case ) snake_case_ :Union[str, Any] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = model(**snake_case , noise=snake_case ) snake_case_ :Any = outputs_dict[0].numpy() snake_case_ :Tuple = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: # make the mask reproducible np.random.seed(2 ) snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :str = int((config.image_size // config.patch_size) ** 2 ) snake_case_ :Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(snake_case: str ): snake_case_ :List[Any] = {} for k, v in inputs_dict.items(): if tf.is_tensor(snake_case ): snake_case_ :Tuple = v.numpy() else: snake_case_ :Optional[Any] = np.array(snake_case ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Any = prepare_numpy_arrays(snake_case ) snake_case_ :Any = model(snake_case , noise=snake_case ) snake_case_ :List[str] = model(**snake_case , noise=snake_case ) self.assert_outputs_same(snake_case , snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[int] , snake_case: Optional[Any] , snake_case: Any ) -> Union[str, Any]: # make masks reproducible np.random.seed(2 ) snake_case_ :List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ :Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ :Optional[int] = tf.constant(snake_case ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ :Tuple = tf_noise super().check_pt_tf_models(snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: str ) -> List[str]: # make mask reproducible np.random.seed(2 ) snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(snake_case ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(snake_case , snake_case ),) if isinstance(snake_case , snake_case ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(snake_case , """_keras_serializable""" , snake_case ) } snake_case_ :str = int((config.image_size // config.patch_size) ** 2 ) snake_case_ :Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ :int = tf.convert_to_tensor(snake_case ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ :List[str] = main_layer_class(snake_case ) snake_case_ :List[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ :int = tf.keras.Model(snake_case , outputs=main_layer(snake_case ) ) snake_case_ :int = model(snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ :List[Any] = os.path.join(snake_case , """keras_model.h5""" ) model.save(snake_case ) snake_case_ :List[str] = tf.keras.models.load_model( snake_case , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(snake_case , tf.keras.Model ) snake_case_ :int = model(snake_case ) self.assert_outputs_same(snake_case , snake_case ) @slow def lowerCAmelCase_ ( self: Tuple ) -> Tuple: # make mask reproducible np.random.seed(2 ) snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Tuple = model(snake_case , noise=snake_case ) if model_class.__name__ == "TFViTMAEModel": snake_case_ :Any = outputs.last_hidden_state.numpy() snake_case_ :Dict = 0 else: snake_case_ :int = outputs.logits.numpy() snake_case_ :str = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case , saved_model=snake_case ) snake_case_ :int = model_class.from_pretrained(snake_case ) snake_case_ :Dict = model(snake_case , noise=snake_case ) if model_class.__name__ == "TFViTMAEModel": snake_case_ :Optional[Any] = after_outputs["""last_hidden_state"""].numpy() snake_case_ :Dict = 0 else: snake_case_ :Dict = after_outputs["""logits"""].numpy() snake_case_ :Union[str, Any] = 0 snake_case_ :Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case , 1E-5 ) def lowerCAmelCase_ ( self: List[str] ) -> List[str]: # make mask reproducible np.random.seed(2 ) snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :str = int((config.image_size // config.patch_size) ** 2 ) snake_case_ :Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ :Dict = model_class(snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Union[str, Any] = model(snake_case , noise=snake_case ) snake_case_ :Optional[int] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(snake_case ) snake_case_ :Optional[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ :Optional[int] = model_class.from_config(model.config ) snake_case_ :Any = new_model(snake_case ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ :Union[str, Any] = new_model(snake_case , noise=snake_case ) self.assert_outputs_same(snake_case , snake_case ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCAmelCase_ ( self: Tuple ) -> str: pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: pass @slow def lowerCAmelCase_ ( self: int ) -> Union[str, Any]: snake_case_ :Optional[Any] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(snake_case ) def A_ ( ): '''simple docstring''' snake_case_ :Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: List[str] ) -> List[str]: return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: # make random mask reproducible across the PT and TF model np.random.seed(2 ) snake_case_ :Any = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) snake_case_ :str = self.default_image_processor snake_case_ :Optional[int] = prepare_img() snake_case_ :int = image_processor(images=snake_case , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ :Optional[Any] = ViTMAEConfig() snake_case_ :List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ :Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ :Tuple = model(**snake_case , noise=snake_case ) # verify the logits snake_case_ :int = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :Union[str, Any] = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case , atol=1E-4 )
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: __a = None __a = logging.get_logger(__name__) __a = "▁" __a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } __a = { "google/pegasus-xsum": 5_12, } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Tuple = VOCAB_FILES_NAMES _A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[Any] = PegasusTokenizer _A : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self: List[str] , snake_case: Dict=None , snake_case: Optional[Any]=None , snake_case: List[str]="<pad>" , snake_case: List[str]="</s>" , snake_case: Optional[int]="<unk>" , snake_case: Optional[int]="<mask_2>" , snake_case: Tuple="<mask_1>" , snake_case: Optional[int]=None , snake_case: List[str]=103 , **snake_case: Tuple , ) -> int: snake_case_ :Dict = offset if additional_special_tokens is not None: if not isinstance(snake_case , snake_case ): raise TypeError( f"""additional_special_tokens should be of type {type(snake_case )}, but is""" f""" {type(snake_case )}""" ) snake_case_ :str = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(snake_case ) , self.offset - 1 ) ] if len(set(snake_case ) ) != len(snake_case ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) snake_case_ :Tuple = additional_special_tokens_extended else: snake_case_ :Dict = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] super().__init__( snake_case , tokenizer_file=snake_case , pad_token=snake_case , eos_token=snake_case , unk_token=snake_case , mask_token=snake_case , mask_token_sent=snake_case , offset=snake_case , additional_special_tokens=snake_case , **snake_case , ) snake_case_ :Optional[int] = vocab_file snake_case_ :str = False if not self.vocab_file else True def lowerCAmelCase_ ( self: List[str] , snake_case: str ) -> List[Any]: snake_case_ :Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" ) return [1 if x in all_special_ids else 0 for x in seq] def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List , snake_case: Optional[List] = None , snake_case: bool = False ) -> List[int]: if already_has_special_tokens: return self._special_token_mask(snake_case ) elif token_ids_a is None: return self._special_token_mask(snake_case ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowerCAmelCase_ ( self: Any , snake_case: List[Any] , snake_case: List[str]=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowerCAmelCase_ ( self: str , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ :Optional[int] = os.path.join( snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ): copyfile(self.vocab_file , snake_case ) return (out_vocab_file,)
66
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = XCLIPTextConfig() # derive patch size from model name snake_case_ :Union[str, Any] = model_name.find("""patch""" ) snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase ) if "large" in model_name: snake_case_ :Optional[Any] = 768 snake_case_ :Union[str, Any] = 3072 snake_case_ :Any = 12 snake_case_ :Any = 1024 snake_case_ :str = 4096 snake_case_ :Union[str, Any] = 16 snake_case_ :Union[str, Any] = 24 snake_case_ :Tuple = 768 snake_case_ :Any = 3072 if model_name == "xclip-large-patch14-16-frames": snake_case_ :Any = 336 snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase ) if "large" in model_name: snake_case_ :List[Any] = 768 return config def A_ ( _lowercase ): '''simple docstring''' if name == "token_embedding.weight": snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: snake_case_ :str = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: snake_case_ :int = name.replace("""c_proj""", """fc2""" ) if name.startswith("""transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" ) if "ln_final" in name: snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" ) if "visual.conv1" in name: snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" ) if "visual.proj" in name: snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" ) if "text_projection" in name: snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" ) if "prompts_visual_ln" in name: snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": snake_case_ :str = name.replace("""positional""", """position""" ) if name.startswith("""mit.resblocks""" ): snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" ) return name def A_ ( _lowercase, _lowercase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ :Dict = orig_state_dict.pop(_lowercase ) if "attn.in_proj" in key: snake_case_ :Optional[Any] = key.split(""".""" ) if key.startswith("""visual""" ): snake_case_ :Any = key_split[3] snake_case_ :Optional[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ :str = val[ :dim, : ] snake_case_ :Optional[int] = val[ dim : dim * 2, : ] snake_case_ :Union[str, Any] = val[ -dim:, : ] else: snake_case_ :Dict = val[ :dim ] snake_case_ :Optional[int] = val[ dim : dim * 2 ] snake_case_ :Optional[int] = val[ -dim: ] else: if "weight" in key: snake_case_ :Optional[Any] = val[ :dim, : ] snake_case_ :List[str] = val[ dim : dim * 2, : ] snake_case_ :Dict = val[ -dim:, : ] else: snake_case_ :Union[str, Any] = val[:dim] snake_case_ :Union[str, Any] = val[ dim : dim * 2 ] snake_case_ :Union[str, Any] = val[-dim:] elif key.startswith("""mit""" ): snake_case_ :Tuple = key_split[2] snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ :Optional[int] = val[:dim, :] snake_case_ :Optional[int] = val[dim : dim * 2, :] snake_case_ :str = val[-dim:, :] else: snake_case_ :str = val[:dim] snake_case_ :Any = val[dim : dim * 2] snake_case_ :int = val[-dim:] else: snake_case_ :Tuple = key_split[2] snake_case_ :Any = config.text_config.hidden_size if "weight" in key: snake_case_ :Dict = val[:dim, :] snake_case_ :Dict = val[ dim : dim * 2, : ] snake_case_ :List[str] = val[-dim:, :] else: snake_case_ :Any = val[:dim] snake_case_ :Tuple = val[ dim : dim * 2 ] snake_case_ :List[str] = val[-dim:] else: snake_case_ :Optional[int] = rename_key(_lowercase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ :Optional[Any] = val.T snake_case_ :Tuple = val return orig_state_dict def A_ ( _lowercase ): '''simple docstring''' if num_frames == 8: snake_case_ :str = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: snake_case_ :int = """eating_spaghetti.npy""" elif num_frames == 32: snake_case_ :List[str] = """eating_spaghetti_32_frames.npy""" snake_case_ :int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", ) snake_case_ :Union[str, Any] = np.load(_lowercase ) return list(_lowercase ) def A_ ( _lowercase, _lowercase=None, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } snake_case_ :Optional[int] = model_to_url[model_name] snake_case_ :int = 8 if "16-frames" in model_name: snake_case_ :List[Any] = 16 elif "shot" in model_name: snake_case_ :Dict = 32 snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase ) snake_case_ :Optional[Any] = XCLIPModel(_lowercase ) model.eval() if "drive" in checkpoint_url: snake_case_ :List[str] = """pytorch_model.bin""" gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase ) snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""] else: snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""] snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase ) snake_case_ :str = XCLIPModel(_lowercase ) snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase ) snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase ) snake_case_ :Optional[int] = prepare_video(_lowercase ) snake_case_ :Optional[Any] = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase ) print("""Shape of pixel values:""", inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ :List[Any] = model(**_lowercase ) # Verify outputs snake_case_ :List[Any] = outputs.logits_per_video snake_case_ :Any = logits_per_video.softmax(dim=1 ) print("""Probs:""", _lowercase ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] ) elif model_name == "xclip-base-patch16": snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] ) elif model_name == "xclip-large-patch14": snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(_lowercase, _lowercase, atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_lowercase, organization="""nielsr""" ) processor.push_to_hub(_lowercase, organization="""nielsr""" ) slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __a = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :Any = seq_length snake_case_ :List[str] = is_training snake_case_ :Optional[Any] = use_attention_mask snake_case_ :Dict = use_token_type_ids snake_case_ :Union[str, Any] = use_labels snake_case_ :str = vocab_size snake_case_ :int = hidden_size snake_case_ :List[str] = num_hidden_layers snake_case_ :Dict = num_attention_heads snake_case_ :Any = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Any = max_position_embeddings snake_case_ :Union[str, Any] = type_vocab_size snake_case_ :Optional[int] = type_sequence_label_size snake_case_ :Union[str, Any] = initializer_range snake_case_ :Tuple = num_choices def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ :Union[str, Any] = None if self.use_attention_mask: snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ :Any = None if self.use_token_type_ids: snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ :int = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case_ :str = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case_ :int = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs snake_case_ :Union[str, Any] = True snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = True _A : Dict = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = FlaxBertModelTester(self ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Dict: # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" ) snake_case_ :Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case )
66
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = torch.device("cpu") def A_ ( ): '''simple docstring''' snake_case_ :Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ :Tuple = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ) return im def A_ ( _lowercase ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = dct.pop(_lowercase ) snake_case_ :List[str] = val def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[str] = [] for k in state_dict.keys(): snake_case_ :str = k if ".pwconv" in k: snake_case_ :List[Any] = k_new.replace(""".pwconv""", """.point_wise_conv""" ) if ".dwconv" in k: snake_case_ :int = k_new.replace(""".dwconv""", """.depth_wise_conv""" ) if ".Proj." in k: snake_case_ :Dict = k_new.replace(""".Proj.""", """.proj.""" ) if "patch_embed" in k_new: snake_case_ :Union[str, Any] = k_new.replace("""patch_embed""", """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: snake_case_ :Any = k_new.split(""".""" ) if ls[2].isdigit(): snake_case_ :Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: snake_case_ :Tuple = k_new.replace("""network""", """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size snake_case_ :Optional[int] = 1000 snake_case_ :Dict = """huggingface/label-files""" snake_case_ :Tuple = """imagenet-1k-id2label.json""" snake_case_ :List[Any] = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type="""dataset""" ), """r""" ) ) snake_case_ :int = {int(_lowercase ): v for k, v in idalabel.items()} snake_case_ :str = idalabel snake_case_ :int = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": snake_case_ :Dict = [3, 3, 6, 4] snake_case_ :str = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": snake_case_ :Union[str, Any] = [3, 3, 9, 6] snake_case_ :Any = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": snake_case_ :Any = [4, 3, 10, 5] snake_case_ :Union[str, Any] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": snake_case_ :str = [4, 4, 12, 6] snake_case_ :List[str] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): snake_case_ :str = torch.hub.load_state_dict_from_url(_lowercase, map_location="""cpu""", check_hash=_lowercase ) else: snake_case_ :List[str] = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :Union[str, Any] = checkpoint snake_case_ :Union[str, Any] = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase, _lowercase, _lowercase ) # load HuggingFace model snake_case_ :Optional[int] = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs snake_case_ :Any = prepare_img() snake_case_ :Optional[int] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) snake_case_ :Optional[int] = processor(images=_lowercase, return_tensors="""pt""" ) # compare outputs from both models snake_case_ :List[Any] = get_expected_output(_lowercase ) snake_case_ :int = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5], _lowercase, atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") __a = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
66
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int: snake_case_ :Any = 0.0 snake_case_ :Tuple = 0.0 for i in range(len(snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]: for i in range(len(snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A_ ( ): '''simple docstring''' snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case_ :Optional[Any] = SelfOrganizingMap() snake_case_ :Dict = 3 snake_case_ :Dict = 0.5 for _ in range(_lowercase ): for j in range(len(_lowercase ) ): # training sample snake_case_ :List[Any] = training_samples[j] # Compute the winning vector snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase ) # Update the winning vector snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase ) # classify test sample snake_case_ :str = [0, 0, 0, 1] snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase ) # results print(f"""Clusters that the test sample belongs to : {winner}""" ) print(f"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
66
1
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = CTRLTokenizer _A : Union[str, Any] = False _A : Union[str, Any] = False def lowerCAmelCase_ ( self: Tuple ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ :str = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] snake_case_ :str = dict(zip(snake_case , range(len(snake_case ) ) ) ) snake_case_ :Optional[int] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] snake_case_ :Any = {"""unk_token""": """<unk>"""} snake_case_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(snake_case ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(snake_case ) ) def lowerCAmelCase_ ( self: Dict , **snake_case: Optional[int] ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: Tuple , snake_case: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = """adapt react readapt apt""" snake_case_ :Tuple = """adapt react readapt apt""" return input_text, output_text def lowerCAmelCase_ ( self: str ) -> int: snake_case_ :int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ :Optional[Any] = """adapt react readapt apt""" snake_case_ :List[Any] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() snake_case_ :List[str] = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) snake_case_ :Dict = tokens + [tokenizer.unk_token] snake_case_ :Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
66
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
1
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def A_ ( _lowercase ): '''simple docstring''' snake_case_, snake_case_ :Any = analyze_text(_lowercase ) snake_case_ :str = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. snake_case_ :Any = sum(single_char_strings.values() ) # one length string snake_case_ :Optional[Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: snake_case_ :List[str] = single_char_strings[ch] snake_case_ :Any = my_str / all_sum my_fir_sum += prob * math.loga(_lowercase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string snake_case_ :Dict = sum(two_char_strings.values() ) snake_case_ :Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: snake_case_ :Dict = cha + cha if sequence in two_char_strings: snake_case_ :Union[str, Any] = two_char_strings[sequence] snake_case_ :Union[str, Any] = int(_lowercase ) / all_sum my_sec_sum += prob * math.loga(_lowercase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :int = Counter() # type: ignore snake_case_ :List[str] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0, len(_lowercase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def A_ ( ): '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
66
"""simple docstring""" import re def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(_lowercase, _lowercase ) ) if __name__ == "__main__": __a = "0094702343221" print(is_sri_lankan_phone_number(phone))
66
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __a = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" __a = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" __a = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self: Dict ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[List[List[str]]] , snake_case: List[List[str]] , snake_case: int = 1 , snake_case: int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case ) }
66
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed __a = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def A_ ( _lowercase ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :Tuple = False elif args.student_type == "gpt2": snake_case_ :Union[str, Any] = False def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :List[str] = False def A_ ( ): '''simple docstring''' snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", ) parser.add_argument( """--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", ) parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" ) parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", ) parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", ) parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", ) parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", ) parser.add_argument( """--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", ) parser.add_argument( """--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", ) parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", ) parser.add_argument( """--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", ) parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" ) parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", ) parser.add_argument( """--fp16_opt_level""", type=_lowercase, default="""O1""", help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ), ) parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" ) parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" ) snake_case_ :Tuple = parser.parse_args() sanity_checks(_lowercase ) # ARGS # init_gpu_params(_lowercase ) set_seed(_lowercase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f: json.dump(vars(_lowercase ), _lowercase, indent=4 ) git_log(args.dump_path ) snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type] snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case_ :Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase ) snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) snake_case_ :str = special_tok_ids snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file, """rb""" ) as fp: snake_case_ :str = pickle.load(_lowercase ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts, """rb""" ) as fp: snake_case_ :Optional[Any] = pickle.load(_lowercase ) snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case_ :Optional[int] = 0.0 # do not predict special tokens snake_case_ :int = torch.from_numpy(_lowercase ) else: snake_case_ :List[str] = None snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config ) snake_case_ :Union[str, Any] = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase ) else: snake_case_ :Optional[int] = student_model_class(_lowercase ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowercase, _lowercase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowercase, _lowercase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case_ :Optional[int] = Distiller( params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
66
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' if length <= 0 or not isinstance(_lowercase, _lowercase ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(_lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
66
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Any ) -> str: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]: # configuration for running training on smdistributed Model Parallel snake_case_ :Tuple = { """enabled""": True, """processes_per_host""": 8, } snake_case_ :List[Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]: # create estimator snake_case_ :List[Any] = self.create_estimator(snake_case ) # run training estimator.fit() # result dataframe snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
1
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
66
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : '''simple docstring''' def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict: snake_case_ :Dict = parent snake_case_ :List[Any] = batch_size snake_case_ :Dict = image_size snake_case_ :Dict = patch_size snake_case_ :Tuple = num_channels snake_case_ :List[Any] = embed_dim snake_case_ :List[str] = depths snake_case_ :str = num_heads snake_case_ :Tuple = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :int = qkv_bias snake_case_ :Tuple = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Dict = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Any = use_absolute_embeddings snake_case_ :int = patch_norm snake_case_ :List[Any] = layer_norm_eps snake_case_ :Tuple = initializer_range snake_case_ :str = is_training snake_case_ :int = scope snake_case_ :Tuple = use_labels snake_case_ :Tuple = type_sequence_label_size snake_case_ :str = encoder_stride snake_case_ :List[Any] = out_features snake_case_ :str = out_indices def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :str = None if self.use_labels: snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: int ) -> Optional[Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any: snake_case_ :Dict = MaskFormerSwinModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[Any] = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case ): snake_case_ :Optional[Any] = ["""stem"""] snake_case_ :str = MaskFormerSwinBackbone(config=snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_ :Optional[int] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :str = config_and_inputs snake_case_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} _A : List[str] = False _A : Any = False _A : Dict = False _A : List[Any] = False _A : Optional[int] = False def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case_ :str = MaskFormerSwinModelTester(self ) snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Any ) -> Tuple: return def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: str ) -> List[str]: pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :str = [*signature.parameters.keys()] snake_case_ :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowerCAmelCase_ ( self: Dict ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str: snake_case_ :List[str] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :Any = outputs.hidden_states snake_case_ :Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swin has a different seq_length snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = 3 snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Any = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: List[str] ) -> str: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: str ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case: str ): snake_case_ :Optional[int] = 0 return t def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ): with torch.no_grad(): snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case ) snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple() def recursive_check(snake_case: List[Any] , snake_case: int ): if isinstance(snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ): recursive_check(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case , snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has""" f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}.""" ) , ) recursive_check(snake_case , snake_case ) for model_class in self.all_model_classes: snake_case_ :int = model_class(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) @require_torch class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ): '''simple docstring''' _A : int = (MaskFormerSwinBackbone,) if is_torch_available() else () _A : Tuple = MaskFormerSwinConfig def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: snake_case_ :List[str] = backbone_class(snake_case ) backbone.to(snake_case ) backbone.eval() snake_case_ :List[Any] = backbone(**snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case ) self.assertIsNotNone(outputs.attentions )
66
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __a = logging.get_logger(__name__) __a = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) __a = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) __a = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) __a = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) __a = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) __a = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) __a = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) __a = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) __a = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) __a = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) __a = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) __a = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) __a = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) __a = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __a = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_MAPPING __a = auto_class_update(FlaxAutoModel) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING __a = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __a = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING __a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __a = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __a = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __a = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __a = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __a = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class lowerCamelCase ( _BaseAutoModelClass ): '''simple docstring''' _A : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __a = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
66
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __a = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case_ :List[str] = 4 snake_case_ :Tuple = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: List[str] ) -> Dict: return (3, 32, 32) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (3, 32, 32) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } snake_case_ :Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 4 snake_case_ :int = (32, 32) snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (4, 32, 32) @property def lowerCAmelCase_ ( self: List[Any] ) -> int: return (4, 32, 32) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: snake_case_ :Dict = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } snake_case_ :List[str] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model.to(snake_case ) snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: str ) -> Any: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model_accelerate.to(snake_case ) model_accelerate.eval() snake_case_ :List[Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case ) snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case_, snake_case_ :str = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case ) model_normal_load.to(snake_case ) model_normal_load.eval() snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""] assert torch_all_close(snake_case , snake_case , rtol=1E-3 ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(snake_case ) snake_case_ :Optional[int] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case ) with torch.no_grad(): snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) ) class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : List[Any] = """sample""" @property def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple: snake_case_ :Union[str, Any] = 4 snake_case_ :Any = 3 snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: return (3, 32, 32) @property def lowerCAmelCase_ ( self: int ) -> Tuple: return (3, 32, 32) def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case_ :List[Any] = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } snake_case_ :int = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :Any = self.dummy_input snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case ) snake_case_ :int = noise snake_case_ :int = model(**snake_case ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self: str ) -> Dict: snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(snake_case ) snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 3 snake_case_ :List[str] = (256, 256) snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :Dict = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(snake_case ) snake_case_ :Optional[int] = 4 snake_case_ :Optional[Any] = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :str = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: # not required for this model pass
66
1
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Tuple = StableDiffusionPipeline.from_pretrained(_lowercase, torch_dtype=torch.floataa ) # load LoRA weight from .safetensors snake_case_ :List[Any] = load_file(_lowercase ) snake_case_ :Any = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: snake_case_ :List[str] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) snake_case_ :str = pipeline.text_encoder else: snake_case_ :List[str] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) snake_case_ :Dict = pipeline.unet # find the target layer snake_case_ :List[Any] = layer_infos.pop(0 ) while len(_lowercase ) > -1: try: snake_case_ :List[Any] = curr_layer.__getattr__(_lowercase ) if len(_lowercase ) > 0: snake_case_ :Dict = layer_infos.pop(0 ) elif len(_lowercase ) == 0: break except Exception: if len(_lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: snake_case_ :Tuple = layer_infos.pop(0 ) snake_case_ :Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""", """lora_up""" ) ) pair_keys.append(_lowercase ) else: pair_keys.append(_lowercase ) pair_keys.append(key.replace("""lora_up""", """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: snake_case_ :str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) snake_case_ :Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase, _lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: snake_case_ :List[str] = state_dict[pair_keys[0]].to(torch.floataa ) snake_case_ :List[Any] = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase, _lowercase ) # update visited list for item in pair_keys: visited.append(_lowercase ) return pipeline if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") __a = parser.parse_args() __a = args.base_model_path __a = args.checkpoint_path __a = args.dump_path __a = args.lora_prefix_unet __a = args.lora_prefix_text_encoder __a = args.alpha __a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __a = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
66
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9}, }, { """framework""": """tensorflow""", """script""": """run_tf.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple=1 ) -> Optional[int]: # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str ) -> Any: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) def lowerCAmelCase_ ( self: List[str] ) -> int: # create estimator snake_case_ :int = self.create_estimator() # run training estimator.fit() # result dataframe snake_case_ :List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :Union[str, Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = StableDiffusionSAGPipeline _A : Optional[Any] = TEXT_TO_IMAGE_PARAMS _A : Any = TEXT_TO_IMAGE_BATCH_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : List[str] = False def lowerCAmelCase_ ( self: Optional[Any] ) -> str: torch.manual_seed(0 ) snake_case_ :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ :Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) snake_case_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ :Tuple = CLIPTextModel(snake_case ) snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ :Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str: if str(snake_case ).startswith("""mps""" ): snake_case_ :Tuple = torch.manual_seed(snake_case ) else: snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case ) snake_case_ :Any = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self: Optional[int] ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: int ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Union[str, Any] = """.""" snake_case_ :str = torch.manual_seed(0 ) snake_case_ :str = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :List[Any] = output.images snake_case_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: Dict ) -> str: snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :Optional[int] = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Union[str, Any] = torch.manual_seed(0 ) snake_case_ :Tuple = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :Optional[int] = output.images snake_case_ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Optional[int] = torch.manual_seed(0 ) snake_case_ :List[str] = sag_pipe( [prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) snake_case_ :Optional[Any] = output.images assert image.shape == (1, 512, 768, 3)
66
1
"""simple docstring""" import enum import shutil import sys __a , __a = shutil.get_terminal_size() __a = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} class lowerCamelCase ( enum.Enum ): '''simple docstring''' _A : Dict = 0 _A : str = 1 def A_ ( _lowercase, _lowercase="" ): '''simple docstring''' sys.stdout.write(str(_lowercase ) + end ) sys.stdout.flush() def A_ ( _lowercase, _lowercase, _lowercase="" ): '''simple docstring''' forceWrite(f"""\u001b[{color}m{content}\u001b[0m""", _lowercase ) def A_ ( ): '''simple docstring''' forceWrite("""\r""" ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def A_ ( ): '''simple docstring''' forceWrite(""" """ * TERMINAL_WIDTH ) reset_cursor() def A_ ( ): '''simple docstring''' reset_cursor() forceWrite("""-""" * TERMINAL_WIDTH )
66
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class lowerCamelCase : '''simple docstring''' def __init__( self: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = {} def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None: snake_case_ :str = {} def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None: if nodea not in self.connections: self.add_node(snake_case ) if nodea not in self.connections: self.add_node(snake_case ) snake_case_ :Dict = probability def lowerCAmelCase_ ( self: List[Any] ) -> list[str]: return list(self.connections ) def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str: snake_case_ :Optional[Any] = 0 snake_case_ :List[str] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowercase, _lowercase, _lowercase ) snake_case_ :int = Counter(graph.get_nodes() ) snake_case_ :Optional[Any] = start for _ in range(_lowercase ): snake_case_ :Tuple = graph.transition(_lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def A_ ( _lowercase, _lowercase ): '''simple docstring''' return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_lowercase, _lowercase ) ) ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' if dataset.ndim != value_array.ndim: snake_case_ :Tuple = ( """Wrong input data's dimensions... """ f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(_lowercase ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ :Tuple = ( """Wrong input data's shape... """ f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(_lowercase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: snake_case_ :Any = ( """Input data have different datatype... """ f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(_lowercase ) snake_case_ :List[Any] = [] for value in value_array: snake_case_ :Union[str, Any] = euclidean(_lowercase, dataset[0] ) snake_case_ :List[Any] = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ :Dict = euclidean(_lowercase, _lowercase ) if dist > temp_dist: snake_case_ :Any = temp_dist snake_case_ :Tuple = dataset_value.tolist() answer.append([vector, dist] ) return answer def A_ ( _lowercase, _lowercase ): '''simple docstring''' return np.dot(_lowercase, _lowercase ) / (norm(_lowercase ) * norm(_lowercase )) if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __a = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __a = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase ) return [m.group(0 ) for m in matches] def A_ ( ): '''simple docstring''' snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ :Dict = { config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. snake_case_ :Optional[Any] = collections.defaultdict(_lowercase ) snake_case_ :int = collections.defaultdict(_lowercase ) snake_case_ :List[str] = collections.defaultdict(_lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(_lowercase ): snake_case_ :int = None if _re_tf_models.match(_lowercase ) is not None: snake_case_ :int = tf_models snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0] elif _re_flax_models.match(_lowercase ) is not None: snake_case_ :List[Any] = flax_models snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0] elif _re_pt_models.match(_lowercase ) is not None: snake_case_ :Optional[Any] = pt_models snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0] if lookup_dict is not None: while len(_lowercase ) > 0: if attr_name in model_prefix_to_model_type: snake_case_ :Optional[int] = True break # Try again after removing the last word in the name snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] ) snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) snake_case_ :Optional[Any] = list(_lowercase ) all_models.sort() snake_case_ :Optional[int] = {"""model_type""": all_models} snake_case_ :Optional[int] = [pt_models[t] for t in all_models] snake_case_ :Any = [tf_models[t] for t in all_models] snake_case_ :Dict = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure snake_case_ :Dict = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: snake_case_ :Optional[Any] = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: snake_case_ :Tuple = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: snake_case_ :Tuple = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. snake_case_ :str = """AutoTokenizer""" snake_case_ :int = [processors[t] for t in all_models] return pd.DataFrame(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ): # The type of pipeline may not exist in this framework if not hasattr(_lowercase, _lowercase ): continue # First extract all model_names snake_case_ :Tuple = [] for name in getattr(_lowercase, _lowercase ).values(): if isinstance(_lowercase, _lowercase ): model_names.append(_lowercase ) else: model_names.extend(list(_lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = get_frameworks_table() snake_case_ :str = Dataset.from_pandas(_lowercase ) snake_case_ :List[Any] = hf_hub_download( """huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase ) snake_case_ :List[str] = Dataset.from_json(_lowercase ) snake_case_ :int = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(_lowercase ) ) } snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. snake_case_ :Tuple = sorted(table.keys() ) snake_case_ :Tuple = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) ) if commit_sha is not None: snake_case_ :Union[str, Any] = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: snake_case_ :List[Any] = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, ) def A_ ( ): '''simple docstring''' snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS snake_case_ :List[str] = [] for key in pipeline_tasks: if key not in in_table: snake_case_ :int = pipeline_tasks[key]["""pt"""] if isinstance(_lowercase, (list, tuple) ): snake_case_ :Any = model[0] snake_case_ :str = model.__name__ if model not in in_table.values(): missing.append(_lowercase ) if len(_lowercase ) > 0: snake_case_ :Optional[int] = """, """.join(_lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __a = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
66
1
"""simple docstring""" from math import isclose, sqrt def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = point_y / 4 / point_x snake_case_ :Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case_ :Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case_ :str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case_ :Tuple = outgoing_gradient**2 + 4 snake_case_ :List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case_ :Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case_ :Optional[Any] = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case_ :List[Any] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case_ :Dict = x_minus if isclose(_lowercase, _lowercase ) else x_plus snake_case_ :Optional[Any] = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def A_ ( _lowercase = 1.4, _lowercase = -9.6 ): '''simple docstring''' snake_case_ :int = 0 snake_case_ :float = first_x_coord snake_case_ :float = first_y_coord snake_case_ :float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case_, snake_case_, snake_case_ :List[Any] = next_point(_lowercase, _lowercase, _lowercase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __a = logging.getLogger(__name__) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Union[str, Any] = """token-classification""" def __init__( self: Any , snake_case: Tuple ) -> List[Any]: if type(snake_case ) == dict: snake_case_ :Optional[int] = Namespace(**snake_case ) snake_case_ :Optional[int] = import_module("""tasks""" ) try: snake_case_ :Any = getattr(snake_case , hparams.task_type ) snake_case_ :TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels ) snake_case_ :str = CrossEntropyLoss().ignore_index super().__init__(snake_case , len(self.labels ) , self.mode ) def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any: return self.model(**snake_case ) def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]: snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :List[str] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Optional[Any] = self(**snake_case ) snake_case_ :List[str] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_ :List[Any] = self.hparams for mode in ["train", "dev", "test"]: snake_case_ :Optional[int] = self._feature_file(snake_case ) if os.path.exists(snake_case ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :Optional[int] = torch.load(snake_case ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case ) snake_case_ :Any = self.token_classification_task.convert_examples_to_features( snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , snake_case ) torch.save(snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader: snake_case_ :int = self._feature_file(snake_case ) logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :str = torch.load(snake_case ) snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]: """Compute validation""" "" snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :Dict = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Dict = self(**snake_case ) snake_case_, snake_case_ :Dict = outputs[:2] snake_case_ :Union[str, Any] = logits.detach().cpu().numpy() snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple: snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) snake_case_ :Tuple = np.argmax(snake_case , axis=2 ) snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) snake_case_ :Optional[Any] = dict(enumerate(self.labels ) ) snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) snake_case_ :str = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(snake_case , snake_case ), """precision""": precision_score(snake_case , snake_case ), """recall""": recall_score(snake_case , snake_case ), """f1""": fa_score(snake_case , snake_case ), } snake_case_ :List[Any] = dict(results.items() ) snake_case_ :Union[str, Any] = results return ret, preds_list, out_label_list def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]: # when stable snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case ) snake_case_ :str = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any: # updating to test_epoch_end instead of deprecated test_end snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 snake_case_ :Optional[int] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict: # Add NER specific options BaseTransformer.add_model_specific_args(snake_case , snake_case ) parser.add_argument( """--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=snake_case , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __a = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __a = NERTransformer.add_model_specific_args(parser, os.getcwd()) __a = parser.parse_args() __a = NERTransformer(args) __a = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __a = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
66
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase, _lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :str = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case_ :Optional[Any] = s_dict.pop(_lowercase ) elif "subsample" in key: snake_case_ :int = s_dict.pop(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_, snake_case_ :List[str] = emb.weight.shape snake_case_ :Union[str, Any] = nn.Linear(_lowercase, _lowercase, bias=_lowercase ) snake_case_ :Dict = emb.weight.data return lin_layer def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Any = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :List[Any] = mam_aaa["""args"""] snake_case_ :int = mam_aaa["""model"""] snake_case_ :List[Any] = state_dict["""decoder.output_projection.weight"""] remove_ignore_keys_(_lowercase ) rename_keys(_lowercase ) snake_case_ :Dict = state_dict["""decoder.embed_tokens.weight"""].shape[0] snake_case_ :str = args.share_decoder_input_output_embed snake_case_ :Optional[Any] = [int(_lowercase ) for i in args.conv_kernel_sizes.split(""",""" )] snake_case_ :Optional[Any] = SpeechaTextConfig( vocab_size=_lowercase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""relu""", num_conv_layers=len(_lowercase ), conv_channels=args.conv_channels, conv_kernel_sizes=_lowercase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=_lowercase, num_beams=5, max_length=200, use_cache=_lowercase, decoder_start_token_id=2, early_stopping=_lowercase, ) snake_case_ :Optional[Any] = SpeechaTextForConditionalGeneration(_lowercase ) snake_case_, snake_case_ :Union[str, Any] = model.model.load_state_dict(_lowercase, strict=_lowercase ) if len(_lowercase ) > 0 and not set(_lowercase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case_ :Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ :Optional[int] = lm_head_weights model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") __a = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
66
"""simple docstring""" from math import factorial class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple: snake_case_ :List[Any] = real if isinstance(snake_case , snake_case ): snake_case_ :Tuple = [1] * rank else: snake_case_ :Optional[Any] = rank def __repr__( self: List[str] ) -> Tuple: return ( f"""{self.real}+""" f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: snake_case_ :Any = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case ) def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]: if not isinstance(snake_case , snake_case ): return Dual(self.real + other , self.duals ) snake_case_ :List[Any] = self.duals.copy() snake_case_ :Tuple = other.duals.copy() if len(snake_case ) > len(snake_case ): o_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) elif len(snake_case ) < len(snake_case ): s_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) snake_case_ :Dict = [] for i in range(len(snake_case ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case ) _A : str = __add__ def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple: return self + other * -1 def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]: if not isinstance(snake_case , snake_case ): snake_case_ :Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case ) snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case ) _A : int = __mul__ def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case ) raise ValueError def __floordiv__( self: int , snake_case: List[Any] ) -> Any: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case ) raise ValueError def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]: if n < 0 or isinstance(snake_case , snake_case ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self snake_case_ :str = self for _ in range(n - 1 ): x *= self return x def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if not callable(_lowercase ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(_lowercase, (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(_lowercase, _lowercase ): raise ValueError("""differentiate() requires an int as input for order""" ) snake_case_ :Optional[Any] = Dual(_lowercase, 1 ) snake_case_ :List[Any] = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def A_ ( _lowercase ): '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
66
1
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: Tuple , snake_case: Optional[int] , snake_case: Any=13 , snake_case: Dict=7 , snake_case: Union[str, Any]=True , snake_case: Any=True , snake_case: Tuple=True , snake_case: List[Any]=True , snake_case: Dict=99 , snake_case: List[str]=32 , snake_case: int=5 , snake_case: Optional[int]=4 , snake_case: Any=37 , snake_case: Dict="gelu" , snake_case: Optional[int]=0.1 , snake_case: Optional[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[Any]=16 , snake_case: str=2 , snake_case: int=0.0_2 , snake_case: List[str]=False , snake_case: Any=True , snake_case: Optional[int]="None" , snake_case: int=3 , snake_case: Dict=4 , snake_case: Dict=None , ) -> int: snake_case_ :List[str] = parent snake_case_ :Optional[Any] = batch_size snake_case_ :List[str] = seq_length snake_case_ :Union[str, Any] = is_training snake_case_ :Union[str, Any] = use_input_mask snake_case_ :Union[str, Any] = use_token_type_ids snake_case_ :int = use_labels snake_case_ :List[str] = vocab_size snake_case_ :List[str] = hidden_size snake_case_ :int = num_hidden_layers snake_case_ :Any = num_attention_heads snake_case_ :Any = intermediate_size snake_case_ :List[str] = hidden_act snake_case_ :List[str] = hidden_dropout_prob snake_case_ :Tuple = attention_probs_dropout_prob snake_case_ :List[Any] = max_position_embeddings snake_case_ :Optional[int] = type_vocab_size snake_case_ :Union[str, Any] = type_sequence_label_size snake_case_ :List[str] = initializer_range snake_case_ :str = num_labels snake_case_ :Tuple = num_choices snake_case_ :List[Any] = relative_attention snake_case_ :str = position_biased_input snake_case_ :List[Any] = pos_att_type snake_case_ :Optional[Any] = scope def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ :Union[str, Any] = None if self.use_input_mask: snake_case_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) snake_case_ :str = None if self.use_token_type_ids: snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ :int = None snake_case_ :Tuple = None snake_case_ :Optional[Any] = None if self.use_labels: snake_case_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ :Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: Optional[Any] ) -> str: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowerCAmelCase_ ( self: List[str] ) -> Any: snake_case_ :Tuple = self.get_config() snake_case_ :Tuple = 300 return config def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[str] ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowerCAmelCase_ ( self: Dict , snake_case: Any , snake_case: Dict , snake_case: Any , snake_case: Optional[Any] , snake_case: Tuple , snake_case: str , snake_case: str ) -> List[str]: snake_case_ :Union[str, Any] = DebertaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0] snake_case_ :List[Any] = model(snake_case , token_type_ids=snake_case )[0] snake_case_ :int = model(snake_case )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: Union[str, Any] , snake_case: Optional[Any] , snake_case: int , snake_case: Dict , snake_case: Dict , snake_case: Dict ) -> Tuple: snake_case_ :Any = DebertaForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: str , snake_case: Any , snake_case: Optional[int] , snake_case: Any , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]: snake_case_ :List[Any] = self.num_labels snake_case_ :str = DebertaForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(snake_case ) def lowerCAmelCase_ ( self: Any , snake_case: Dict , snake_case: Dict , snake_case: Optional[int] , snake_case: Dict , snake_case: int , snake_case: str , snake_case: Tuple ) -> List[Any]: snake_case_ :Tuple = self.num_labels snake_case_ :Union[str, Any] = DebertaForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: Optional[int] , snake_case: int , snake_case: List[str] , snake_case: Tuple , snake_case: Dict , snake_case: Optional[int] , snake_case: Tuple ) -> List[str]: snake_case_ :List[Any] = DebertaForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :List[Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self: Tuple ) -> int: snake_case_ :Optional[Any] = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) :str = config_and_inputs snake_case_ :List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[int] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _A : str = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _A : Any = True _A : Dict = False _A : Any = False _A : str = False _A : str = False def lowerCAmelCase_ ( self: Optional[Any] ) -> List[str]: snake_case_ :Union[str, Any] = DebertaModelTester(self ) snake_case_ :Union[str, Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def lowerCAmelCase_ ( self: int ) -> Dict: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case_ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> str: snake_case_ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*snake_case ) def lowerCAmelCase_ ( self: str ) -> int: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :Dict = DebertaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="""Model not available yet""" ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: pass @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case_ :Optional[int] = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) snake_case_ :Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) snake_case_ :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case_ :int = model(snake_case , attention_mask=snake_case )[0] # compare the actual values for a slice. snake_case_ :Union[str, Any] = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
66
"""simple docstring""" from __future__ import annotations __a = 10 def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = 1 snake_case_ :List[str] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ :list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ :Any = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints snake_case_ :Optional[Any] = 0 for b in range(_lowercase ): for i in buckets[b]: snake_case_ :Union[str, Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def A_ ( _lowercase ): '''simple docstring''' snake_case_ :int = {} snake_case_ :List[Any] = job["""started_at"""] snake_case_ :int = job["""completed_at"""] snake_case_ :str = date_parser.parse(_lowercase ) snake_case_ :Tuple = date_parser.parse(_lowercase ) snake_case_ :Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) snake_case_ :int = start snake_case_ :Optional[int] = end snake_case_ :Optional[Any] = duration_in_min return job_info def A_ ( _lowercase, _lowercase=None ): '''simple docstring''' snake_case_ :Optional[Any] = None if token is not None: snake_case_ :Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} snake_case_ :Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" snake_case_ :Optional[int] = requests.get(_lowercase, headers=_lowercase ).json() snake_case_ :Optional[Any] = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(_lowercase ) for job in result["""jobs"""]} ) snake_case_ :int = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(_lowercase ): snake_case_ :Union[str, Any] = requests.get(url + f"""&page={i + 2}""", headers=_lowercase ).json() job_time.update({job["""name"""]: extract_time_from_single_job(_lowercase ) for job in result["""jobs"""]} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __a = parser.parse_args() __a = get_job_time(args.workflow_run_id) __a = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v['duration']}""")
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" from math import isqrt, loga def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Tuple = [True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, _lowercase, _lowercase ): snake_case_ :Dict = False return [i for i in range(2, _lowercase ) if is_prime[i]] def A_ ( _lowercase = 800800, _lowercase = 800800 ): '''simple docstring''' snake_case_ :Union[str, Any] = degree * loga(_lowercase ) snake_case_ :Tuple = int(_lowercase ) snake_case_ :List[str] = calculate_prime_numbers(_lowercase ) snake_case_ :Union[str, Any] = 0 snake_case_ :List[str] = 0 snake_case_ :Optional[Any] = len(_lowercase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :Union[str, Any] = controlnet_params snake_case_ :Union[str, Any] = """bird""" snake_case_ :List[Any] = jax.device_count() snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case_ :Any = jax.random.PRNGKey(0 ) snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() ) snake_case_ :List[Any] = replicate(snake_case ) snake_case_ :List[str] = shard(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :Dict = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1] snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Dict = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :str = controlnet_params snake_case_ :Optional[int] = """Chef in the kitchen""" snake_case_ :Union[str, Any] = jax.device_count() snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case_ :str = jax.random.PRNGKey(0 ) snake_case_ :str = jax.random.split(snake_case , jax.device_count() ) snake_case_ :Tuple = replicate(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :int = shard(snake_case ) snake_case_ :List[str] = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :int = images[0, 253:256, 253:256, -1] snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Optional[int] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
66
1
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging __a = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] __a = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = " Hello world! cécé herlolip" __a = [ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"), ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"), ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"), ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"), ] def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase, _lowercase ) def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = dct.pop(_lowercase ) snake_case_ :Optional[Any] = val def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Tuple = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :Union[str, Any] = torch.hub.load("""pytorch/fairseq""", """bart.large.cnn""" ).eval() hub_interface.model.load_state_dict(sd["""model"""] ) return hub_interface def A_ ( _lowercase ): '''simple docstring''' snake_case_, snake_case_ :Any = emb.weight.shape snake_case_ :Dict = nn.Linear(_lowercase, _lowercase, bias=_lowercase ) snake_case_ :int = emb.weight.data return lin_layer @torch.no_grad() def A_ ( _lowercase, _lowercase, _lowercase=None ): '''simple docstring''' if not os.path.exists(_lowercase ): snake_case_ :str = torch.hub.load("""pytorch/fairseq""", _lowercase ).eval() else: snake_case_ :List[Any] = load_xsum_checkpoint(_lowercase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case_ :Union[str, Any] = checkpoint_path.replace(""".""", """-""" ) snake_case_ :List[Any] = BartConfig.from_pretrained(_lowercase ) snake_case_ :Tuple = bart.encode(_lowercase ).unsqueeze(0 ) snake_case_ :Any = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase, return_tensors="""pt""" ).unsqueeze(0 ) if not torch.eq(_lowercase, _lowercase ).all(): raise ValueError( f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" ) if checkpoint_path == "bart.large.mnli": snake_case_ :int = bart.state_dict() remove_ignore_keys_(_lowercase ) snake_case_ :int = state_dict["""model.decoder.embed_tokens.weight"""] for src, dest in mnli_rename_keys: rename_key(_lowercase, _lowercase, _lowercase ) snake_case_ :Optional[int] = BartForSequenceClassification(_lowercase ).eval() model.load_state_dict(_lowercase ) snake_case_ :int = bart.predict("""mnli""", _lowercase, return_logits=_lowercase ) snake_case_ :str = model(_lowercase )[0] # logits else: # no classification heads to worry about snake_case_ :Dict = bart.model.state_dict() remove_ignore_keys_(_lowercase ) snake_case_ :int = state_dict["""decoder.embed_tokens.weight"""] snake_case_ :str = bart.extract_features(_lowercase ) if hf_checkpoint_name == "facebook/bart-large": snake_case_ :Optional[Any] = BartModel(_lowercase ).eval() model.load_state_dict(_lowercase ) snake_case_ :Optional[Any] = model(_lowercase ).model[0] else: snake_case_ :List[Any] = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt model.model.load_state_dict(_lowercase ) if hasattr(_lowercase, """lm_head""" ): snake_case_ :Tuple = make_linear_from_emb(model.model.shared ) snake_case_ :Optional[int] = model.model(_lowercase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) __a = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __a = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") __a = parser.parse_args() __a = "cpu" __a = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" __a = "path-to-your-trained-model" __a = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __a = pipe.to(device) # to channels last __a = pipe.unet.to(memory_format=torch.channels_last) __a = pipe.vae.to(memory_format=torch.channels_last) __a = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __a = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __a = torch.randn(2, 4, 64, 64) __a = torch.rand(1) * 9_99 __a = torch.randn(2, 77, 7_68) __a = (sample, timestep, encoder_hidden_status) try: __a = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __a = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __a = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __a = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __a = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __a = 6_66 __a = torch.Generator(device).manual_seed(seed) __a = {"generator": generator} if args.steps is not None: __a = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __a = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
66
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" ) snake_case_ :Any = json.loads(open(_lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(""".pt""" ): snake_case_ :Optional[int] = args.output + """.pt""" snake_case_ :List[str] = OrderedDict() with tf.device("""/CPU:0""" ): snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir ) snake_case_ :str = reader.get_variable_to_shape_map() for key_name in shapes.keys(): snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): snake_case_ :Any = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): snake_case_ :Optional[int] = 8 snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :List[str] = torch.tensor(_lowercase ) elif key_name.startswith("""model/moe""" ): snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/softmlp/kernel""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): snake_case_ :Dict = key_name[-9:-7] for i in range(16 ): snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) snake_case_ :Tuple = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/mlp""" ): snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p1/bias""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player snake_case_ :str = vnp.copy() # same because it is one dimensional snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/bias""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player snake_case_ :Any = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/ln""" ): snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :int = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.startswith("""model/att""" ): snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum snake_case_ :Dict = state[:, 0, :, :] snake_case_ :int = state[:, 1, :, :] snake_case_ :List[str] = state[:, 2, :, :] snake_case_ :str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[int] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player snake_case_ :int = torch.tensor(_lowercase ) snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player snake_case_ :Dict = torch.tensor(_lowercase ) snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/o/kernel""" ): snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player snake_case_ :str = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = torch.tensor(_lowercase ) elif key_name.startswith("""model/an""" ): snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) if key_name.startswith("""model/wte""" ): snake_case_ :Tuple = """lm_head.weight""" snake_case_ :List[str] = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) elif key_name.startswith("""model/wob""" ): snake_case_ :str = """final_logits_bias""" snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = state.reshape((1, -1) ) snake_case_ :Union[str, Any] = torch.tensor(_lowercase ) elif key_name == "model/dense/kernel": snake_case_ :str = """model.last_project.weight""" snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = torch.tensor(_lowercase ) elif key_name == "model/dense_1/bias": snake_case_ :Optional[int] = """model.last_project.bias""" snake_case_ :Tuple = vnp.copy() # same because it is one dimensional snake_case_ :Any = torch.tensor(_lowercase ) torch.save(_lowercase, args.output ) if __name__ == "__main__": __a = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") __a = parser.parse_args() convert_tf_gptsan_to_pt(args)
66
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Optional[int] = """nllb-moe""" _A : List[str] = ["""past_key_values"""] _A : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self: Union[str, Any] , snake_case: str=128_112 , snake_case: Optional[int]=1_024 , snake_case: str=12 , snake_case: Union[str, Any]=4_096 , snake_case: Dict=16 , snake_case: Tuple=12 , snake_case: Union[str, Any]=4_096 , snake_case: str=16 , snake_case: Dict=0.0_5 , snake_case: Any=0.0_5 , snake_case: str=True , snake_case: Any=True , snake_case: Any="relu" , snake_case: Dict=1_024 , snake_case: List[Any]=0.1 , snake_case: Union[str, Any]=0.1 , snake_case: Optional[Any]=0.0 , snake_case: str=0.0_2 , snake_case: int=2 , snake_case: List[str]=True , snake_case: str=False , snake_case: Optional[Any]="float32" , snake_case: int=False , snake_case: Optional[Any]=128 , snake_case: Any=64 , snake_case: List[Any]=4 , snake_case: str=4 , snake_case: int=0.0_0_1 , snake_case: Optional[Any]=0.0_0_1 , snake_case: List[Any]="all" , snake_case: Dict=False , snake_case: Any=False , snake_case: Dict=1.0 , snake_case: Optional[Any]=0.2 , snake_case: Any=1 , snake_case: Tuple=0 , snake_case: Any=2 , snake_case: str=False , **snake_case: Optional[Any] , ) -> int: snake_case_ :List[Any] = vocab_size snake_case_ :List[Any] = max_position_embeddings snake_case_ :Any = d_model snake_case_ :Tuple = encoder_ffn_dim snake_case_ :Tuple = encoder_layers snake_case_ :List[str] = encoder_attention_heads snake_case_ :List[Any] = decoder_ffn_dim snake_case_ :Optional[Any] = decoder_layers snake_case_ :Optional[Any] = decoder_attention_heads snake_case_ :Tuple = dropout snake_case_ :List[str] = attention_dropout snake_case_ :Union[str, Any] = activation_dropout snake_case_ :Tuple = activation_function snake_case_ :Optional[int] = init_std snake_case_ :Union[str, Any] = encoder_layerdrop snake_case_ :int = decoder_layerdrop snake_case_ :Dict = use_cache snake_case_ :Optional[int] = encoder_layers snake_case_ :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ :int = router_z_loss_coef snake_case_ :Tuple = router_aux_loss_coef snake_case_ :Tuple = decoder_sparse_step snake_case_ :str = encoder_sparse_step snake_case_ :Optional[Any] = num_experts snake_case_ :Union[str, Any] = expert_capacity snake_case_ :Dict = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) snake_case_ :Any = router_dtype snake_case_ :List[str] = router_ignore_padding_tokens snake_case_ :Optional[int] = batch_prioritized_routing snake_case_ :Optional[int] = second_expert_policy snake_case_ :str = normalize_router_prob_before_dropping snake_case_ :Optional[int] = moe_eval_capacity_token_fraction snake_case_ :Tuple = moe_token_dropout snake_case_ :Optional[int] = output_router_logits super().__init__( pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , **snake_case , )
66
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __a = pd.read_csv("sample_data.csv", header=None) __a = df.shape[:1][0] # If you're using some other dataset input the target column __a = df.iloc[:, 1:2] __a = actual_data.values.reshape(len_data, 1) __a = MinMaxScaler().fit_transform(actual_data) __a = 10 __a = 5 __a = 20 __a = len_data - periods * look_back __a = actual_data[:division] __a = actual_data[division - look_back :] __a , __a = [], [] __a , __a = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __a = np.array(train_x) __a = np.array(test_x) __a = np.array([list(i.ravel()) for i in train_y]) __a = np.array([list(i.ravel()) for i in test_y]) __a = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") __a = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) __a = model.predict(x_test)
66
1
"""simple docstring""" import logging import os from .state import PartialState class lowerCamelCase ( logging.LoggerAdapter ): '''simple docstring''' @staticmethod def lowerCAmelCase_ ( snake_case: Union[str, Any] ) -> Any: snake_case_ :int = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[Any] , snake_case: List[Any] , *snake_case: Dict , **snake_case: List[str] ) -> Any: if PartialState._shared_state == {}: raise RuntimeError( """You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" ) snake_case_ :Any = kwargs.pop("""main_process_only""" , snake_case ) snake_case_ :List[Any] = kwargs.pop("""in_order""" , snake_case ) if self.isEnabledFor(snake_case ): if self._should_log(snake_case ): snake_case_, snake_case_ :int = self.process(snake_case , snake_case ) self.logger.log(snake_case , snake_case , *snake_case , **snake_case ) elif in_order: snake_case_ :Optional[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: snake_case_, snake_case_ :List[Any] = self.process(snake_case , snake_case ) self.logger.log(snake_case , snake_case , *snake_case , **snake_case ) state.wait_for_everyone() def A_ ( _lowercase, _lowercase = None ): '''simple docstring''' if log_level is None: snake_case_ :Any = os.environ.get("""ACCELERATE_LOG_LEVEL""", _lowercase ) snake_case_ :int = logging.getLogger(_lowercase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_lowercase, {} )
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __a = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case_ :List[str] = 4 snake_case_ :Tuple = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: List[str] ) -> Dict: return (3, 32, 32) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (3, 32, 32) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } snake_case_ :Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 4 snake_case_ :int = (32, 32) snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (4, 32, 32) @property def lowerCAmelCase_ ( self: List[Any] ) -> int: return (4, 32, 32) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: snake_case_ :Dict = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } snake_case_ :List[str] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model.to(snake_case ) snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: str ) -> Any: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model_accelerate.to(snake_case ) model_accelerate.eval() snake_case_ :List[Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case ) snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case_, snake_case_ :str = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case ) model_normal_load.to(snake_case ) model_normal_load.eval() snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""] assert torch_all_close(snake_case , snake_case , rtol=1E-3 ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(snake_case ) snake_case_ :Optional[int] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case ) with torch.no_grad(): snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) ) class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : List[Any] = """sample""" @property def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple: snake_case_ :Union[str, Any] = 4 snake_case_ :Any = 3 snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: return (3, 32, 32) @property def lowerCAmelCase_ ( self: int ) -> Tuple: return (3, 32, 32) def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case_ :List[Any] = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } snake_case_ :int = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :Any = self.dummy_input snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case ) snake_case_ :int = noise snake_case_ :int = model(**snake_case ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self: str ) -> Dict: snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(snake_case ) snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 3 snake_case_ :List[str] = (256, 256) snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :Dict = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(snake_case ) snake_case_ :Optional[int] = 4 snake_case_ :Optional[Any] = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :str = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: # not required for this model pass
66
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = XCLIPTextConfig() # derive patch size from model name snake_case_ :Union[str, Any] = model_name.find("""patch""" ) snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase ) if "large" in model_name: snake_case_ :Optional[Any] = 768 snake_case_ :Union[str, Any] = 3072 snake_case_ :Any = 12 snake_case_ :Any = 1024 snake_case_ :str = 4096 snake_case_ :Union[str, Any] = 16 snake_case_ :Union[str, Any] = 24 snake_case_ :Tuple = 768 snake_case_ :Any = 3072 if model_name == "xclip-large-patch14-16-frames": snake_case_ :Any = 336 snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase ) if "large" in model_name: snake_case_ :List[Any] = 768 return config def A_ ( _lowercase ): '''simple docstring''' if name == "token_embedding.weight": snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: snake_case_ :str = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: snake_case_ :int = name.replace("""c_proj""", """fc2""" ) if name.startswith("""transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" ) if "ln_final" in name: snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" ) if "visual.conv1" in name: snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" ) if "visual.proj" in name: snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" ) if "text_projection" in name: snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" ) if "prompts_visual_ln" in name: snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": snake_case_ :str = name.replace("""positional""", """position""" ) if name.startswith("""mit.resblocks""" ): snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" ) return name def A_ ( _lowercase, _lowercase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ :Dict = orig_state_dict.pop(_lowercase ) if "attn.in_proj" in key: snake_case_ :Optional[Any] = key.split(""".""" ) if key.startswith("""visual""" ): snake_case_ :Any = key_split[3] snake_case_ :Optional[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ :str = val[ :dim, : ] snake_case_ :Optional[int] = val[ dim : dim * 2, : ] snake_case_ :Union[str, Any] = val[ -dim:, : ] else: snake_case_ :Dict = val[ :dim ] snake_case_ :Optional[int] = val[ dim : dim * 2 ] snake_case_ :Optional[int] = val[ -dim: ] else: if "weight" in key: snake_case_ :Optional[Any] = val[ :dim, : ] snake_case_ :List[str] = val[ dim : dim * 2, : ] snake_case_ :Dict = val[ -dim:, : ] else: snake_case_ :Union[str, Any] = val[:dim] snake_case_ :Union[str, Any] = val[ dim : dim * 2 ] snake_case_ :Union[str, Any] = val[-dim:] elif key.startswith("""mit""" ): snake_case_ :Tuple = key_split[2] snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ :Optional[int] = val[:dim, :] snake_case_ :Optional[int] = val[dim : dim * 2, :] snake_case_ :str = val[-dim:, :] else: snake_case_ :str = val[:dim] snake_case_ :Any = val[dim : dim * 2] snake_case_ :int = val[-dim:] else: snake_case_ :Tuple = key_split[2] snake_case_ :Any = config.text_config.hidden_size if "weight" in key: snake_case_ :Dict = val[:dim, :] snake_case_ :Dict = val[ dim : dim * 2, : ] snake_case_ :List[str] = val[-dim:, :] else: snake_case_ :Any = val[:dim] snake_case_ :Tuple = val[ dim : dim * 2 ] snake_case_ :List[str] = val[-dim:] else: snake_case_ :Optional[int] = rename_key(_lowercase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ :Optional[Any] = val.T snake_case_ :Tuple = val return orig_state_dict def A_ ( _lowercase ): '''simple docstring''' if num_frames == 8: snake_case_ :str = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: snake_case_ :int = """eating_spaghetti.npy""" elif num_frames == 32: snake_case_ :List[str] = """eating_spaghetti_32_frames.npy""" snake_case_ :int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", ) snake_case_ :Union[str, Any] = np.load(_lowercase ) return list(_lowercase ) def A_ ( _lowercase, _lowercase=None, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } snake_case_ :Optional[int] = model_to_url[model_name] snake_case_ :int = 8 if "16-frames" in model_name: snake_case_ :List[Any] = 16 elif "shot" in model_name: snake_case_ :Dict = 32 snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase ) snake_case_ :Optional[Any] = XCLIPModel(_lowercase ) model.eval() if "drive" in checkpoint_url: snake_case_ :List[str] = """pytorch_model.bin""" gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase ) snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""] else: snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""] snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase ) snake_case_ :str = XCLIPModel(_lowercase ) snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase ) snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase ) snake_case_ :Optional[int] = prepare_video(_lowercase ) snake_case_ :Optional[Any] = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase ) print("""Shape of pixel values:""", inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ :List[Any] = model(**_lowercase ) # Verify outputs snake_case_ :List[Any] = outputs.logits_per_video snake_case_ :Any = logits_per_video.softmax(dim=1 ) print("""Probs:""", _lowercase ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] ) elif model_name == "xclip-base-patch16": snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] ) elif model_name == "xclip-large-patch14": snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(_lowercase, _lowercase, atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_lowercase, organization="""nielsr""" ) processor.push_to_hub(_lowercase, organization="""nielsr""" ) slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __a = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
66
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __a = logging.get_logger(__name__) __a = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: List[str] , snake_case: int=None , snake_case: str=None , *snake_case: Any , **snake_case: List[Any] ) -> Optional[int]: super().__init__(*snake_case , **snake_case ) if config is None: assert isinstance(self.model , snake_case ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f""" {self.model.__class__}""" ) snake_case_ :Optional[Any] = self.model.config else: snake_case_ :int = config snake_case_ :List[Any] = data_args snake_case_ :str = self.config.tgt_vocab_size if isinstance(self.config , snake_case ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" """ padding..""" ) if self.args.label_smoothing == 0: snake_case_ :Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case_ :Tuple = label_smoothed_nll_loss def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int ) -> str: if self.optimizer is None: snake_case_ :List[Any] = ["""bias""", """LayerNorm.weight"""] snake_case_ :Union[str, Any] = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] snake_case_ :str = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case_ :Optional[int] = Adafactor snake_case_ :Dict = {"""scale_parameter""": False, """relative_step""": False} else: snake_case_ :Optional[Any] = AdamW snake_case_ :Optional[Any] = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } snake_case_ :Any = self.args.learning_rate if self.sharded_ddp: snake_case_ :List[str] = OSS( params=snake_case , optim=snake_case , **snake_case , ) else: snake_case_ :Optional[int] = optimizer_cls(snake_case , **snake_case ) if self.lr_scheduler is None: snake_case_ :List[Any] = self._get_lr_scheduler(snake_case ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[Any] ) -> str: snake_case_ :int = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case_ :List[str] = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case_ :Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case_ :int = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case ) return scheduler def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: Optional[int] , snake_case: Union[str, Any] ) -> Optional[Any]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case_ :Union[str, Any] = model(**snake_case , use_cache=snake_case )[0] snake_case_ :Tuple = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case_, snake_case_ :Any = model(**snake_case , labels=snake_case , use_cache=snake_case )[:2] else: # compute label smoothed loss snake_case_ :List[Any] = model(**snake_case , use_cache=snake_case )[0] snake_case_ :Any = torch.nn.functional.log_softmax(snake_case , dim=-1 ) snake_case_, snake_case_ :List[str] = self.loss_fn(snake_case , snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self: str , snake_case: List[Any] , snake_case: List[Any] ) -> List[Any]: snake_case_ :int = inputs.pop("""labels""" ) snake_case_, snake_case_ :Any = self._compute_loss(snake_case , snake_case , snake_case ) return loss def lowerCAmelCase_ ( self: List[Any] , snake_case: nn.Module , snake_case: Dict[str, Union[torch.Tensor, Any]] , snake_case: bool , snake_case: Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: snake_case_ :Optional[int] = self._prepare_inputs(snake_case ) snake_case_ :Optional[int] = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case_ :Union[str, Any] = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **snake_case , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case_ :Optional[int] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["""max_length"""] ) snake_case_ :str = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data snake_case_, snake_case_ :str = self._compute_loss(snake_case , snake_case , snake_case ) snake_case_ :Optional[int] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case_ :Optional[int] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case_ :List[Any] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self: str , snake_case: List[Any] , snake_case: Optional[int] ) -> int: # If PAD token is not defined at least EOS token has to be defined snake_case_ :List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f""" padded to `max_length`={max_length}""" ) snake_case_ :List[str] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case_ :str = tensor return padded_tensor
66
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :Any = seq_length snake_case_ :List[str] = is_training snake_case_ :Optional[Any] = use_attention_mask snake_case_ :Dict = use_token_type_ids snake_case_ :Union[str, Any] = use_labels snake_case_ :str = vocab_size snake_case_ :int = hidden_size snake_case_ :List[str] = num_hidden_layers snake_case_ :Dict = num_attention_heads snake_case_ :Any = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Any = max_position_embeddings snake_case_ :Union[str, Any] = type_vocab_size snake_case_ :Optional[int] = type_sequence_label_size snake_case_ :Union[str, Any] = initializer_range snake_case_ :Tuple = num_choices def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ :Union[str, Any] = None if self.use_attention_mask: snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ :Any = None if self.use_token_type_ids: snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ :int = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case_ :str = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case_ :int = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs snake_case_ :Union[str, Any] = True snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = True _A : Dict = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = FlaxBertModelTester(self ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Dict: # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" ) snake_case_ :Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case )
66
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __a = TypeVar("T") class lowerCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self: Optional[int] , snake_case: list[T] , snake_case: Callable[[T, T], T] ) -> None: snake_case_ :Any | T = None snake_case_ :int = len(snake_case ) snake_case_ :list[T] = [any_type for _ in range(self.N )] + arr snake_case_ :Union[str, Any] = fnc self.build() def lowerCAmelCase_ ( self: Tuple ) -> None: for p in range(self.N - 1 , 0 , -1 ): snake_case_ :str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: T ) -> None: p += self.N snake_case_ :Tuple = v while p > 1: snake_case_ :Any = p // 2 snake_case_ :str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowerCAmelCase_ ( self: str , snake_case: int , snake_case: int ) -> T | None: # noqa: E741 snake_case_, snake_case_ :Tuple = l + self.N, r + self.N snake_case_ :T | None = None while l <= r: if l % 2 == 1: snake_case_ :Tuple = self.st[l] if res is None else self.fn(snake_case , self.st[l] ) if r % 2 == 0: snake_case_ :Optional[Any] = self.st[r] if res is None else self.fn(snake_case , self.st[r] ) snake_case_, snake_case_ :Dict = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __a = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __a = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __a = SegmentTree(test_array, min) __a = SegmentTree(test_array, max) __a = SegmentTree(test_array, lambda a, b: a + b) def A_ ( ): '''simple docstring''' for i in range(len(_lowercase ) ): for j in range(_lowercase, len(_lowercase ) ): snake_case_ :Tuple = reduce(_lowercase, test_array[i : j + 1] ) snake_case_ :Union[str, Any] = reduce(_lowercase, test_array[i : j + 1] ) snake_case_ :Optional[Any] = reduce(lambda _lowercase, _lowercase : a + b, test_array[i : j + 1] ) assert min_range == min_segment_tree.query(_lowercase, _lowercase ) assert max_range == max_segment_tree.query(_lowercase, _lowercase ) assert sum_range == sum_segment_tree.query(_lowercase, _lowercase ) test_all_segments() for index, value in test_updates.items(): __a = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
66
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int: snake_case_ :Any = 0.0 snake_case_ :Tuple = 0.0 for i in range(len(snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]: for i in range(len(snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A_ ( ): '''simple docstring''' snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case_ :Optional[Any] = SelfOrganizingMap() snake_case_ :Dict = 3 snake_case_ :Dict = 0.5 for _ in range(_lowercase ): for j in range(len(_lowercase ) ): # training sample snake_case_ :List[Any] = training_samples[j] # Compute the winning vector snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase ) # Update the winning vector snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase ) # classify test sample snake_case_ :str = [0, 0, 0, 1] snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase ) # results print(f"""Clusters that the test sample belongs to : {winner}""" ) print(f"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
66
1
"""simple docstring""" from __future__ import annotations __a = 10 def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = 1 snake_case_ :List[str] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ :list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ :Any = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints snake_case_ :Optional[Any] = 0 for b in range(_lowercase ): for i in buckets[b]: snake_case_ :Union[str, Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Tuple = [int(_lowercase ) for i in ip_va_address.split(""".""" ) if i.isdigit()] return len(_lowercase ) == 4 and all(0 <= int(_lowercase ) <= 254 for octet in octets ) if __name__ == "__main__": __a = input().strip() __a = "valid" if is_ip_va_address_valid(ip) else "invalid" print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
66
"""simple docstring""" import re def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(_lowercase, _lowercase ) ) if __name__ == "__main__": __a = "0094702343221" print(is_sri_lankan_phone_number(phone))
66
1
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: str = "▁" , snake_case: bool = True , snake_case: Union[str, AddedToken] = "<unk>" , snake_case: Union[str, AddedToken] = "</s>" , snake_case: Union[str, AddedToken] = "<pad>" , ) -> Any: snake_case_ :Any = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } snake_case_ :Dict = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): snake_case_ :Tuple = token_dict["""token"""] snake_case_ :Union[str, Any] = Tokenizer(Unigram() ) snake_case_ :Tuple = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""" ) , """ """ ), normalizers.Lowercase(), ] ) snake_case_ :str = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=snake_case , add_prefix_space=snake_case ), pre_tokenizers.Digits(individual_digits=snake_case ), pre_tokenizers.Punctuation(), ] ) snake_case_ :Dict = decoders.Metaspace(replacement=snake_case , add_prefix_space=snake_case ) snake_case_ :str = TemplateProcessing( single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , ) snake_case_ :Tuple = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(snake_case , snake_case ) def lowerCAmelCase_ ( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> int: snake_case_ :List[Any] = trainers.UnigramTrainer( vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , ) if isinstance(snake_case , snake_case ): snake_case_ :int = [files] self._tokenizer.train(snake_case , trainer=snake_case ) self.add_unk_id() def lowerCAmelCase_ ( self: Dict , snake_case: Union[Iterator[str], Iterator[Iterator[str]]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> List[str]: snake_case_ :Optional[Any] = trainers.UnigramTrainer( vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , ) self._tokenizer.train_from_iterator(snake_case , trainer=snake_case ) self.add_unk_id() def lowerCAmelCase_ ( self: List[Any] ) -> Tuple: snake_case_ :Dict = json.loads(self._tokenizer.to_str() ) snake_case_ :Optional[int] = self.special_tokens["""unk"""]["""id"""] snake_case_ :Optional[int] = Tokenizer.from_str(json.dumps(snake_case ) )
66
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed __a = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def A_ ( _lowercase ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :Tuple = False elif args.student_type == "gpt2": snake_case_ :Union[str, Any] = False def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :List[str] = False def A_ ( ): '''simple docstring''' snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", ) parser.add_argument( """--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", ) parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" ) parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", ) parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", ) parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", ) parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", ) parser.add_argument( """--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", ) parser.add_argument( """--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", ) parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", ) parser.add_argument( """--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", ) parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" ) parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", ) parser.add_argument( """--fp16_opt_level""", type=_lowercase, default="""O1""", help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ), ) parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" ) parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" ) snake_case_ :Tuple = parser.parse_args() sanity_checks(_lowercase ) # ARGS # init_gpu_params(_lowercase ) set_seed(_lowercase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f: json.dump(vars(_lowercase ), _lowercase, indent=4 ) git_log(args.dump_path ) snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type] snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case_ :Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase ) snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) snake_case_ :str = special_tok_ids snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file, """rb""" ) as fp: snake_case_ :str = pickle.load(_lowercase ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts, """rb""" ) as fp: snake_case_ :Optional[Any] = pickle.load(_lowercase ) snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case_ :Optional[int] = 0.0 # do not predict special tokens snake_case_ :int = torch.from_numpy(_lowercase ) else: snake_case_ :List[str] = None snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config ) snake_case_ :Union[str, Any] = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase ) else: snake_case_ :Optional[int] = student_model_class(_lowercase ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowercase, _lowercase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowercase, _lowercase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case_ :Optional[int] = Distiller( params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
66
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __a = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models snake_case_ :List[Any] = """lm_head""" snake_case_ :Union[str, Any] = getattr(_lowercase, _lowercase ) if weight_type is not None: snake_case_ :str = getattr(_lowercase, _lowercase ).shape else: snake_case_ :Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ :Any = value elif weight_type == "weight_g": snake_case_ :Tuple = value elif weight_type == "weight_v": snake_case_ :Optional[int] = value elif weight_type == "bias": snake_case_ :Tuple = value else: snake_case_ :List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = [] snake_case_ :Union[str, Any] = fairseq_model.state_dict() snake_case_ :str = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): snake_case_ :Dict = False if "conv_layers" in name: load_conv_layer( _lowercase, _lowercase, _lowercase, _lowercase, hf_model.config.feat_extract_norm == """group""", ) snake_case_ :Dict = True else: for key, mapped_key in MAPPING.items(): snake_case_ :List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ :List[str] = True if "*" in mapped_key: snake_case_ :Tuple = name.split(_lowercase )[0].split(""".""" )[-2] snake_case_ :Tuple = mapped_key.replace("""*""", _lowercase ) if "weight_g" in name: snake_case_ :Dict = """weight_g""" elif "weight_v" in name: snake_case_ :Dict = """weight_v""" elif "bias" in name: snake_case_ :Optional[Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ :List[str] = """weight""" else: snake_case_ :Optional[Any] = None set_recursively(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = full_name.split("""conv_layers.""" )[-1] snake_case_ :List[str] = name.split(""".""" ) snake_case_ :Any = int(items[0] ) snake_case_ :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ :Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ :List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case_ :int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ :int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowercase ) @torch.no_grad() def A_ ( _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=True ): '''simple docstring''' if config_path is not None: snake_case_ :str = UniSpeechConfig.from_pretrained(_lowercase ) else: snake_case_ :Tuple = UniSpeechConfig() if is_finetuned: if dict_path: snake_case_ :Optional[int] = Dictionary.load_from_json(_lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ :Optional[int] = target_dict.pad_index snake_case_ :Optional[int] = target_dict.bos_index snake_case_ :Dict = target_dict.eos_index snake_case_ :List[str] = len(target_dict.symbols ) snake_case_ :int = os.path.join(_lowercase, """vocab.json""" ) if not os.path.isdir(_lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) ) return os.makedirs(_lowercase, exist_ok=_lowercase ) snake_case_ :List[str] = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ :Optional[Any] = 42 snake_case_ :List[Any] = 43 with open(_lowercase, """w""", encoding="""utf-8""" ) as vocab_handle: json.dump(_lowercase, _lowercase ) snake_case_ :Union[str, Any] = WavaVecaPhonemeCTCTokenizer( _lowercase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=_lowercase, ) snake_case_ :List[Any] = True if config.feat_extract_norm == """layer""" else False snake_case_ :Any = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_lowercase, return_attention_mask=_lowercase, ) snake_case_ :str = WavaVecaProcessor(feature_extractor=_lowercase, tokenizer=_lowercase ) processor.save_pretrained(_lowercase ) snake_case_ :Optional[Any] = UniSpeechForCTC(_lowercase ) else: snake_case_ :str = UniSpeechForPreTraining(_lowercase ) if is_finetuned: snake_case_, snake_case_, snake_case_ :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: snake_case_, snake_case_, snake_case_ :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) snake_case_ :Any = model[0].eval() recursively_load_weights(_lowercase, _lowercase, _lowercase ) hf_unispeech.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __a = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
66
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Any ) -> str: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]: # configuration for running training on smdistributed Model Parallel snake_case_ :Tuple = { """enabled""": True, """processes_per_host""": 8, } snake_case_ :List[Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]: # create estimator snake_case_ :List[Any] = self.create_estimator(snake_case ) # run training estimator.fit() # result dataframe snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: str , snake_case: Dict , snake_case: List[Any]=3 , snake_case: Any=32 , snake_case: Optional[int]=3 , snake_case: List[Any]=10 , snake_case: List[str]=[10, 20, 30, 40] , snake_case: Dict=[1, 1, 2, 1] , snake_case: Optional[int]=True , snake_case: Dict=True , snake_case: Union[str, Any]="relu" , snake_case: List[Any]=3 , snake_case: Dict=None , ) -> Dict: snake_case_ :str = parent snake_case_ :List[Any] = batch_size snake_case_ :int = image_size snake_case_ :Dict = num_channels snake_case_ :Any = embeddings_size snake_case_ :str = hidden_sizes snake_case_ :Tuple = depths snake_case_ :str = is_training snake_case_ :int = use_labels snake_case_ :Optional[int] = hidden_act snake_case_ :Dict = num_labels snake_case_ :Tuple = scope snake_case_ :List[Any] = len(snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]: snake_case_ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :Any = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ :Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Any , snake_case: Optional[int] ) -> Tuple: snake_case_ :Dict = TFResNetModel(config=snake_case ) snake_case_ :Dict = model(snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase_ ( self: str , snake_case: List[str] , snake_case: Union[str, Any] , snake_case: Any ) -> int: snake_case_ :Any = self.num_labels snake_case_ :Optional[int] = TFResNetForImageClassification(snake_case ) snake_case_ :List[Any] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]: snake_case_ :Optional[int] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :Tuple = config_and_inputs snake_case_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _A : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _A : List[str] = False _A : Any = False _A : int = False _A : List[Any] = False _A : Any = False def lowerCAmelCase_ ( self: str ) -> Optional[Any]: snake_case_ :List[str] = TFResNetModelTester(self ) snake_case_ :List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def lowerCAmelCase_ ( self: int ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: List[str] ) -> List[str]: return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: pass def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(snake_case ) snake_case_ :Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :Optional[int] = [*signature.parameters.keys()] snake_case_ :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> List[str]: snake_case_ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: str ) -> List[Any]: def check_hidden_states_output(snake_case: List[str] , snake_case: List[str] , snake_case: str ): snake_case_ :Optional[Any] = model_class(snake_case ) snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ :List[str] = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_, snake_case_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ :Any = layer_type snake_case_ :List[Any] = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :str = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]: snake_case_ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :int = TFResNetModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def A_ ( ): '''simple docstring''' snake_case_ :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> List[str]: snake_case_ :Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case_ :int = self.default_image_processor snake_case_ :Union[str, Any] = prepare_img() snake_case_ :Optional[Any] = image_processor(images=snake_case , return_tensors="""tf""" ) # forward pass snake_case_ :List[str] = model(**snake_case ) # verify the logits snake_case_ :Optional[Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1E-4 ) )
66
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : '''simple docstring''' def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict: snake_case_ :Dict = parent snake_case_ :List[Any] = batch_size snake_case_ :Dict = image_size snake_case_ :Dict = patch_size snake_case_ :Tuple = num_channels snake_case_ :List[Any] = embed_dim snake_case_ :List[str] = depths snake_case_ :str = num_heads snake_case_ :Tuple = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :int = qkv_bias snake_case_ :Tuple = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Dict = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Any = use_absolute_embeddings snake_case_ :int = patch_norm snake_case_ :List[Any] = layer_norm_eps snake_case_ :Tuple = initializer_range snake_case_ :str = is_training snake_case_ :int = scope snake_case_ :Tuple = use_labels snake_case_ :Tuple = type_sequence_label_size snake_case_ :str = encoder_stride snake_case_ :List[Any] = out_features snake_case_ :str = out_indices def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :str = None if self.use_labels: snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: int ) -> Optional[Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any: snake_case_ :Dict = MaskFormerSwinModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[Any] = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case ): snake_case_ :Optional[Any] = ["""stem"""] snake_case_ :str = MaskFormerSwinBackbone(config=snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_ :Optional[int] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :str = config_and_inputs snake_case_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} _A : List[str] = False _A : Any = False _A : Dict = False _A : List[Any] = False _A : Optional[int] = False def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case_ :str = MaskFormerSwinModelTester(self ) snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Any ) -> Tuple: return def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: str ) -> List[str]: pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :str = [*signature.parameters.keys()] snake_case_ :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowerCAmelCase_ ( self: Dict ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str: snake_case_ :List[str] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :Any = outputs.hidden_states snake_case_ :Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swin has a different seq_length snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = 3 snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Any = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: List[str] ) -> str: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: str ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case: str ): snake_case_ :Optional[int] = 0 return t def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ): with torch.no_grad(): snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case ) snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple() def recursive_check(snake_case: List[Any] , snake_case: int ): if isinstance(snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ): recursive_check(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case , snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has""" f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}.""" ) , ) recursive_check(snake_case , snake_case ) for model_class in self.all_model_classes: snake_case_ :int = model_class(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) @require_torch class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ): '''simple docstring''' _A : int = (MaskFormerSwinBackbone,) if is_torch_available() else () _A : Tuple = MaskFormerSwinConfig def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: snake_case_ :List[str] = backbone_class(snake_case ) backbone.to(snake_case ) backbone.eval() snake_case_ :List[Any] = backbone(**snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case ) self.assertIsNotNone(outputs.attentions )
66
1
"""simple docstring""" import os def A_ ( ): '''simple docstring''' with open(os.path.dirname(_lowercase ) + """/p022_names.txt""" ) as file: snake_case_ :Optional[Any] = str(file.readlines()[0] ) snake_case_ :str = names.replace("""\"""", """""" ).split(""",""" ) names.sort() snake_case_ :Dict = 0 snake_case_ :Union[str, Any] = 0 for i, name in enumerate(_lowercase ): for letter in name: name_score += ord(_lowercase ) - 64 total_score += (i + 1) * name_score snake_case_ :Dict = 0 return total_score if __name__ == "__main__": print(solution())
66
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __a = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case_ :List[str] = 4 snake_case_ :Tuple = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: List[str] ) -> Dict: return (3, 32, 32) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (3, 32, 32) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } snake_case_ :Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 4 snake_case_ :int = (32, 32) snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (4, 32, 32) @property def lowerCAmelCase_ ( self: List[Any] ) -> int: return (4, 32, 32) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: snake_case_ :Dict = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } snake_case_ :List[str] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model.to(snake_case ) snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: str ) -> Any: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model_accelerate.to(snake_case ) model_accelerate.eval() snake_case_ :List[Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case ) snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case_, snake_case_ :str = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case ) model_normal_load.to(snake_case ) model_normal_load.eval() snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""] assert torch_all_close(snake_case , snake_case , rtol=1E-3 ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(snake_case ) snake_case_ :Optional[int] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case ) with torch.no_grad(): snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) ) class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : List[Any] = """sample""" @property def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple: snake_case_ :Union[str, Any] = 4 snake_case_ :Any = 3 snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: return (3, 32, 32) @property def lowerCAmelCase_ ( self: int ) -> Tuple: return (3, 32, 32) def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case_ :List[Any] = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } snake_case_ :int = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :Any = self.dummy_input snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case ) snake_case_ :int = noise snake_case_ :int = model(**snake_case ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self: str ) -> Dict: snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(snake_case ) snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 3 snake_case_ :List[str] = (256, 256) snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :Dict = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(snake_case ) snake_case_ :Optional[int] = 4 snake_case_ :Optional[Any] = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :str = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: # not required for this model pass
66
1
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __a = 1.054_571_817e-34 # unit of ℏ : J * s __a = 3e8 # unit of c : m * s^-1 def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: snake_case_ :Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: snake_case_ :List[Any] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: snake_case_ :int = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
66
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = """visual_bert""" def __init__( self: List[Any] , snake_case: Union[str, Any]=30_522 , snake_case: Dict=768 , snake_case: Any=512 , snake_case: Any=12 , snake_case: Any=12 , snake_case: List[Any]=3_072 , snake_case: int="gelu" , snake_case: int=0.1 , snake_case: str=0.1 , snake_case: str=512 , snake_case: Dict=2 , snake_case: int=0.0_2 , snake_case: Optional[int]=1E-12 , snake_case: str=False , snake_case: List[Any]=True , snake_case: Union[str, Any]=1 , snake_case: Optional[Any]=0 , snake_case: Tuple=2 , **snake_case: Union[str, Any] , ) -> Union[str, Any]: super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) snake_case_ :Optional[int] = vocab_size snake_case_ :Optional[int] = max_position_embeddings snake_case_ :Union[str, Any] = hidden_size snake_case_ :Optional[int] = visual_embedding_dim snake_case_ :int = num_hidden_layers snake_case_ :Optional[int] = num_attention_heads snake_case_ :Optional[int] = intermediate_size snake_case_ :str = hidden_act snake_case_ :Optional[Any] = hidden_dropout_prob snake_case_ :str = attention_probs_dropout_prob snake_case_ :List[Any] = initializer_range snake_case_ :Optional[Any] = type_vocab_size snake_case_ :Tuple = layer_norm_eps snake_case_ :Optional[Any] = bypass_transformer snake_case_ :List[str] = special_visual_initialize
66
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = StableDiffusionSAGPipeline _A : Optional[Any] = TEXT_TO_IMAGE_PARAMS _A : Any = TEXT_TO_IMAGE_BATCH_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : List[str] = False def lowerCAmelCase_ ( self: Optional[Any] ) -> str: torch.manual_seed(0 ) snake_case_ :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ :Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) snake_case_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ :Tuple = CLIPTextModel(snake_case ) snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ :Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str: if str(snake_case ).startswith("""mps""" ): snake_case_ :Tuple = torch.manual_seed(snake_case ) else: snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case ) snake_case_ :Any = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self: Optional[int] ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: int ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Union[str, Any] = """.""" snake_case_ :str = torch.manual_seed(0 ) snake_case_ :str = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :List[Any] = output.images snake_case_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: Dict ) -> str: snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :Optional[int] = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Union[str, Any] = torch.manual_seed(0 ) snake_case_ :Tuple = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :Optional[int] = output.images snake_case_ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Optional[int] = torch.manual_seed(0 ) snake_case_ :List[str] = sag_pipe( [prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) snake_case_ :Optional[Any] = output.images assert image.shape == (1, 512, 768, 3)
66
1
"""simple docstring""" import math import tensorflow as tf from packaging import version def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = tf.convert_to_tensor(_lowercase ) snake_case_ :Optional[int] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) )) return x * cdf def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Dict = tf.convert_to_tensor(_lowercase ) snake_case_ :str = tf.cast(math.pi, x.dtype ) snake_case_ :Union[str, Any] = tf.cast(0.04_4715, x.dtype ) snake_case_ :Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowercase, 3 )) )) return x * cdf def A_ ( _lowercase ): '''simple docstring''' snake_case_ :str = tf.convert_to_tensor(_lowercase ) return x * tf.tanh(tf.math.softplus(_lowercase ) ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[str] = tf.convert_to_tensor(_lowercase ) snake_case_ :str = tf.cast(0.04_4715, x.dtype ) snake_case_ :List[str] = tf.cast(0.79_7884_5608, x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :str = tf.convert_to_tensor(_lowercase ) snake_case_ :Optional[Any] = tf.cast(1.702, x.dtype ) return x * tf.math.sigmoid(coeff * x ) def A_ ( _lowercase ): '''simple docstring''' return tf.clip_by_value(_gelu(_lowercase ), -10, 10 ) def A_ ( _lowercase, _lowercase=-1 ): '''simple docstring''' snake_case_, snake_case_ :List[Any] = tf.split(_lowercase, 2, axis=_lowercase ) return a * tf.math.sigmoid(_lowercase ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def A_ ( _lowercase ): '''simple docstring''' return tf.keras.activations.gelu(_lowercase, approximate=_lowercase ) __a = tf.keras.activations.gelu __a = approximate_gelu_wrap else: __a = _gelu __a = _gelu_new __a = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def A_ ( _lowercase ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
66
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class lowerCamelCase : '''simple docstring''' def __init__( self: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = {} def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None: snake_case_ :str = {} def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None: if nodea not in self.connections: self.add_node(snake_case ) if nodea not in self.connections: self.add_node(snake_case ) snake_case_ :Dict = probability def lowerCAmelCase_ ( self: List[Any] ) -> list[str]: return list(self.connections ) def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str: snake_case_ :Optional[Any] = 0 snake_case_ :List[str] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowercase, _lowercase, _lowercase ) snake_case_ :int = Counter(graph.get_nodes() ) snake_case_ :Optional[Any] = start for _ in range(_lowercase ): snake_case_ :Tuple = graph.transition(_lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" def A_ ( _lowercase = 100 ): '''simple docstring''' snake_case_ :Dict = (n * (n + 1) // 2) ** 2 snake_case_ :List[str] = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __a = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __a = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase ) return [m.group(0 ) for m in matches] def A_ ( ): '''simple docstring''' snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ :Dict = { config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. snake_case_ :Optional[Any] = collections.defaultdict(_lowercase ) snake_case_ :int = collections.defaultdict(_lowercase ) snake_case_ :List[str] = collections.defaultdict(_lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(_lowercase ): snake_case_ :int = None if _re_tf_models.match(_lowercase ) is not None: snake_case_ :int = tf_models snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0] elif _re_flax_models.match(_lowercase ) is not None: snake_case_ :List[Any] = flax_models snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0] elif _re_pt_models.match(_lowercase ) is not None: snake_case_ :Optional[Any] = pt_models snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0] if lookup_dict is not None: while len(_lowercase ) > 0: if attr_name in model_prefix_to_model_type: snake_case_ :Optional[int] = True break # Try again after removing the last word in the name snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] ) snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) snake_case_ :Optional[Any] = list(_lowercase ) all_models.sort() snake_case_ :Optional[int] = {"""model_type""": all_models} snake_case_ :Optional[int] = [pt_models[t] for t in all_models] snake_case_ :Any = [tf_models[t] for t in all_models] snake_case_ :Dict = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure snake_case_ :Dict = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: snake_case_ :Optional[Any] = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: snake_case_ :Tuple = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: snake_case_ :Tuple = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. snake_case_ :str = """AutoTokenizer""" snake_case_ :int = [processors[t] for t in all_models] return pd.DataFrame(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ): # The type of pipeline may not exist in this framework if not hasattr(_lowercase, _lowercase ): continue # First extract all model_names snake_case_ :Tuple = [] for name in getattr(_lowercase, _lowercase ).values(): if isinstance(_lowercase, _lowercase ): model_names.append(_lowercase ) else: model_names.extend(list(_lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = get_frameworks_table() snake_case_ :str = Dataset.from_pandas(_lowercase ) snake_case_ :List[Any] = hf_hub_download( """huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase ) snake_case_ :List[str] = Dataset.from_json(_lowercase ) snake_case_ :int = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(_lowercase ) ) } snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. snake_case_ :Tuple = sorted(table.keys() ) snake_case_ :Tuple = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) ) if commit_sha is not None: snake_case_ :Union[str, Any] = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: snake_case_ :List[Any] = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, ) def A_ ( ): '''simple docstring''' snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS snake_case_ :List[str] = [] for key in pipeline_tasks: if key not in in_table: snake_case_ :int = pipeline_tasks[key]["""pt"""] if isinstance(_lowercase, (list, tuple) ): snake_case_ :Any = model[0] snake_case_ :str = model.__name__ if model not in in_table.values(): missing.append(_lowercase ) if len(_lowercase ) > 0: snake_case_ :Optional[int] = """, """.join(_lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __a = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
66
1
"""simple docstring""" def A_ ( ): '''simple docstring''' snake_case_ :int = [] snake_case_ :int = 1 while len(_lowercase ) < 1e6: constant.append(str(_lowercase ) ) i += 1 snake_case_ :str = """""".join(_lowercase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
66
"""simple docstring""" import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __a = logging.getLogger(__name__) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Union[str, Any] = """token-classification""" def __init__( self: Any , snake_case: Tuple ) -> List[Any]: if type(snake_case ) == dict: snake_case_ :Optional[int] = Namespace(**snake_case ) snake_case_ :Optional[int] = import_module("""tasks""" ) try: snake_case_ :Any = getattr(snake_case , hparams.task_type ) snake_case_ :TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels ) snake_case_ :str = CrossEntropyLoss().ignore_index super().__init__(snake_case , len(self.labels ) , self.mode ) def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any: return self.model(**snake_case ) def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]: snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :List[str] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Optional[Any] = self(**snake_case ) snake_case_ :List[str] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_ :List[Any] = self.hparams for mode in ["train", "dev", "test"]: snake_case_ :Optional[int] = self._feature_file(snake_case ) if os.path.exists(snake_case ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :Optional[int] = torch.load(snake_case ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case ) snake_case_ :Any = self.token_classification_task.convert_examples_to_features( snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , snake_case ) torch.save(snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader: snake_case_ :int = self._feature_file(snake_case ) logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :str = torch.load(snake_case ) snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]: """Compute validation""" "" snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :Dict = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Dict = self(**snake_case ) snake_case_, snake_case_ :Dict = outputs[:2] snake_case_ :Union[str, Any] = logits.detach().cpu().numpy() snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple: snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) snake_case_ :Tuple = np.argmax(snake_case , axis=2 ) snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) snake_case_ :Optional[Any] = dict(enumerate(self.labels ) ) snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) snake_case_ :str = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(snake_case , snake_case ), """precision""": precision_score(snake_case , snake_case ), """recall""": recall_score(snake_case , snake_case ), """f1""": fa_score(snake_case , snake_case ), } snake_case_ :List[Any] = dict(results.items() ) snake_case_ :Union[str, Any] = results return ret, preds_list, out_label_list def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]: # when stable snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case ) snake_case_ :str = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any: # updating to test_epoch_end instead of deprecated test_end snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 snake_case_ :Optional[int] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict: # Add NER specific options BaseTransformer.add_model_specific_args(snake_case , snake_case ) parser.add_argument( """--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=snake_case , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __a = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __a = NERTransformer.add_model_specific_args(parser, os.getcwd()) __a = parser.parse_args() __a = NERTransformer(args) __a = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __a = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
66
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_ :Union[str, Any] = tempfile.mkdtemp() snake_case_ :Any = SamImageProcessor() snake_case_ :Tuple = SamProcessor(snake_case ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self: Any , **snake_case: Optional[Any] ) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor def lowerCAmelCase_ ( self: Dict ) -> int: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self: List[Any] ) -> Tuple: snake_case_ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ :List[str] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]: snake_case_ :Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ :Optional[int] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) snake_case_ :Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> List[str]: snake_case_ :Dict = self.get_image_processor() snake_case_ :List[Any] = SamProcessor(image_processor=snake_case ) snake_case_ :int = self.prepare_image_inputs() snake_case_ :Dict = image_processor(snake_case , return_tensors="""np""" ) snake_case_ :Optional[Any] = processor(images=snake_case , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: snake_case_ :str = self.get_image_processor() snake_case_ :str = SamProcessor(image_processor=snake_case ) snake_case_ :Dict = [torch.ones((1, 3, 5, 5) )] snake_case_ :int = [[1_764, 2_646]] snake_case_ :Optional[Any] = [[683, 1_024]] snake_case_ :Any = processor.post_process_masks(snake_case , snake_case , snake_case ) self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) ) snake_case_ :Dict = processor.post_process_masks( snake_case , torch.tensor(snake_case ) , torch.tensor(snake_case ) ) self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) ) # should also work with np snake_case_ :str = [np.ones((1, 3, 5, 5) )] snake_case_ :Union[str, Any] = processor.post_process_masks(snake_case , np.array(snake_case ) , np.array(snake_case ) ) self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) ) snake_case_ :List[str] = [[1, 0], [0, 1]] with self.assertRaises(snake_case ): snake_case_ :int = processor.post_process_masks(snake_case , np.array(snake_case ) , np.array(snake_case ) ) @require_vision @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case_ :Dict = tempfile.mkdtemp() snake_case_ :Dict = SamImageProcessor() snake_case_ :int = SamProcessor(snake_case ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: Tuple ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor def lowerCAmelCase_ ( self: str ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_ :List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ :Optional[Any] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self: List[Any] ) -> int: snake_case_ :Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ :Optional[int] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) snake_case_ :Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_ :Any = self.get_image_processor() snake_case_ :int = SamProcessor(image_processor=snake_case ) snake_case_ :List[Any] = self.prepare_image_inputs() snake_case_ :Optional[Any] = image_processor(snake_case , return_tensors="""np""" ) snake_case_ :List[str] = processor(images=snake_case , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_ :Dict = self.get_image_processor() snake_case_ :Any = SamProcessor(image_processor=snake_case ) snake_case_ :Optional[int] = [tf.ones((1, 3, 5, 5) )] snake_case_ :Dict = [[1_764, 2_646]] snake_case_ :Dict = [[683, 1_024]] snake_case_ :Union[str, Any] = processor.post_process_masks(snake_case , snake_case , snake_case , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) ) snake_case_ :List[str] = processor.post_process_masks( snake_case , tf.convert_to_tensor(snake_case ) , tf.convert_to_tensor(snake_case ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) ) # should also work with np snake_case_ :Dict = [np.ones((1, 3, 5, 5) )] snake_case_ :str = processor.post_process_masks( snake_case , np.array(snake_case ) , np.array(snake_case ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) ) snake_case_ :Optional[Any] = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): snake_case_ :List[str] = processor.post_process_masks( snake_case , np.array(snake_case ) , np.array(snake_case ) , return_tensors="""tf""" ) @require_vision @require_torchvision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case_ :int = tempfile.mkdtemp() snake_case_ :str = SamImageProcessor() snake_case_ :Optional[int] = SamProcessor(snake_case ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self: str , **snake_case: int ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor def lowerCAmelCase_ ( self: str ) -> List[str]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case_ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ :str = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple: snake_case_ :int = self.get_image_processor() snake_case_ :Optional[Any] = SamProcessor(image_processor=snake_case ) snake_case_ :Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) snake_case_ :Optional[int] = [tf.convert_to_tensor(snake_case )] snake_case_ :Optional[int] = [torch.tensor(snake_case )] snake_case_ :Dict = [[1_764, 2_646]] snake_case_ :Optional[Any] = [[683, 1_024]] snake_case_ :List[str] = processor.post_process_masks( snake_case , snake_case , snake_case , return_tensors="""tf""" ) snake_case_ :Union[str, Any] = processor.post_process_masks( snake_case , snake_case , snake_case , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def lowerCAmelCase_ ( self: List[str] ) -> int: snake_case_ :Optional[Any] = self.get_image_processor() snake_case_ :Any = SamProcessor(image_processor=snake_case ) snake_case_ :Union[str, Any] = self.prepare_image_inputs() snake_case_ :Optional[Any] = image_processor(snake_case , return_tensors="""pt""" )["""pixel_values"""].numpy() snake_case_ :Dict = processor(images=snake_case , return_tensors="""pt""" )["""pixel_values"""].numpy() snake_case_ :int = image_processor(snake_case , return_tensors="""tf""" )["""pixel_values"""].numpy() snake_case_ :Optional[Any] = processor(images=snake_case , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(snake_case , snake_case ) ) self.assertTrue(np.allclose(snake_case , snake_case ) ) self.assertTrue(np.allclose(snake_case , snake_case ) )
66
"""simple docstring""" from math import factorial class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple: snake_case_ :List[Any] = real if isinstance(snake_case , snake_case ): snake_case_ :Tuple = [1] * rank else: snake_case_ :Optional[Any] = rank def __repr__( self: List[str] ) -> Tuple: return ( f"""{self.real}+""" f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: snake_case_ :Any = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case ) def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]: if not isinstance(snake_case , snake_case ): return Dual(self.real + other , self.duals ) snake_case_ :List[Any] = self.duals.copy() snake_case_ :Tuple = other.duals.copy() if len(snake_case ) > len(snake_case ): o_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) elif len(snake_case ) < len(snake_case ): s_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) snake_case_ :Dict = [] for i in range(len(snake_case ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case ) _A : str = __add__ def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple: return self + other * -1 def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]: if not isinstance(snake_case , snake_case ): snake_case_ :Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case ) snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case ) _A : int = __mul__ def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case ) raise ValueError def __floordiv__( self: int , snake_case: List[Any] ) -> Any: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case ) raise ValueError def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]: if n < 0 or isinstance(snake_case , snake_case ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self snake_case_ :str = self for _ in range(n - 1 ): x *= self return x def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if not callable(_lowercase ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(_lowercase, (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(_lowercase, _lowercase ): raise ValueError("""differentiate() requires an int as input for order""" ) snake_case_ :Optional[Any] = Dual(_lowercase, 1 ) snake_case_ :List[Any] = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def A_ ( _lowercase ): '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(_lowercase ) * abs(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
66
"""simple docstring""" from __future__ import annotations __a = 10 def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = 1 snake_case_ :List[str] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ :list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ :Any = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints snake_case_ :Optional[Any] = 0 for b in range(_lowercase ): for i in buckets[b]: snake_case_ :Union[str, Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __a = logging.get_logger(__name__) __a = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Any = """t5""" _A : int = ["""past_key_values"""] _A : List[str] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self: List[Any] , snake_case: Tuple=32_128 , snake_case: Tuple=512 , snake_case: int=64 , snake_case: List[Any]=2_048 , snake_case: str=6 , snake_case: List[Any]=None , snake_case: List[Any]=8 , snake_case: int=32 , snake_case: List[str]=128 , snake_case: str=0.1 , snake_case: Union[str, Any]=1E-6 , snake_case: str=1.0 , snake_case: Union[str, Any]="relu" , snake_case: List[str]=True , snake_case: List[str]=True , snake_case: Tuple=0 , snake_case: Tuple=1 , **snake_case: List[str] , ) -> int: snake_case_ :Optional[int] = vocab_size snake_case_ :str = d_model snake_case_ :str = d_kv snake_case_ :Tuple = d_ff snake_case_ :str = num_layers snake_case_ :List[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry snake_case_ :List[str] = num_heads snake_case_ :List[Any] = relative_attention_num_buckets snake_case_ :Optional[int] = relative_attention_max_distance snake_case_ :List[str] = dropout_rate snake_case_ :Tuple = layer_norm_epsilon snake_case_ :List[str] = initializer_factor snake_case_ :Optional[int] = feed_forward_proj snake_case_ :str = use_cache snake_case_ :str = self.feed_forward_proj.split("""-""" ) snake_case_ :Optional[Any] = act_info[-1] snake_case_ :Union[str, Any] = act_info[0] == """gated""" if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": snake_case_ :Optional[Any] = """gelu_new""" super().__init__( pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , **snake_case , ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]: snake_case_ :str = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: snake_case_ :Dict = """past_encoder_sequence + sequence""" snake_case_ :Dict = {0: """batch"""} snake_case_ :Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: snake_case_ :int = {0: """batch""", 1: """decoder_sequence"""} snake_case_ :Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(snake_case , direction="""inputs""" ) return common_inputs @property def lowerCAmelCase_ ( self: str ) -> int: return 13
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' snake_case_ :int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def A_ ( _lowercase = 5000 ): '''simple docstring''' snake_case_ :Any = [(i * (3 * i - 1)) // 2 for i in range(1, _lowercase )] for i, pentagonal_i in enumerate(_lowercase ): for j in range(_lowercase, len(_lowercase ) ): snake_case_ :List[str] = pentagonal_nums[j] snake_case_ :Dict = pentagonal_i + pentagonal_j snake_case_ :str = pentagonal_j - pentagonal_i if is_pentagonal(_lowercase ) and is_pentagonal(_lowercase ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :Union[str, Any] = controlnet_params snake_case_ :Union[str, Any] = """bird""" snake_case_ :List[Any] = jax.device_count() snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case_ :Any = jax.random.PRNGKey(0 ) snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() ) snake_case_ :List[Any] = replicate(snake_case ) snake_case_ :List[str] = shard(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :Dict = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1] snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Dict = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :str = controlnet_params snake_case_ :Optional[int] = """Chef in the kitchen""" snake_case_ :Union[str, Any] = jax.device_count() snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case_ :str = jax.random.PRNGKey(0 ) snake_case_ :str = jax.random.split(snake_case , jax.device_count() ) snake_case_ :Tuple = replicate(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :int = shard(snake_case ) snake_case_ :List[str] = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :int = images[0, 253:256, 253:256, -1] snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Optional[int] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
66
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def A_ ( _lowercase, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case_ :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def A_ ( _lowercase, _lowercase, _lowercase=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: snake_case_ :Any = """""" else: snake_case_ :str = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ :str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ :Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ :Dict = in_proj_weight[ : config.hidden_size, : ] snake_case_ :int = in_proj_bias[: config.hidden_size] snake_case_ :str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ :str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ :Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] snake_case_ :Union[str, Any] = in_proj_bias[-config.hidden_size :] def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = dct.pop(_lowercase ) snake_case_ :Tuple = val def A_ ( ): '''simple docstring''' snake_case_ :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ :Union[str, Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ) return im @torch.no_grad() def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = DeiTConfig() # all deit models have fine-tuned heads snake_case_ :Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case_ :int = 1000 snake_case_ :Optional[int] = """huggingface/label-files""" snake_case_ :List[Any] = """imagenet-1k-id2label.json""" snake_case_ :str = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type="""dataset""" ), """r""" ) ) snake_case_ :Dict = {int(_lowercase ): v for k, v in idalabel.items()} snake_case_ :Optional[Any] = idalabel snake_case_ :Union[str, Any] = {v: k for k, v in idalabel.items()} snake_case_ :Any = int(deit_name[-6:-4] ) snake_case_ :Any = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case_ :Tuple = 192 snake_case_ :Optional[int] = 768 snake_case_ :Tuple = 12 snake_case_ :Tuple = 3 elif deit_name[9:].startswith("""small""" ): snake_case_ :List[Any] = 384 snake_case_ :Dict = 1536 snake_case_ :Optional[int] = 12 snake_case_ :str = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case_ :int = 1024 snake_case_ :List[Any] = 4096 snake_case_ :Any = 24 snake_case_ :Optional[int] = 16 # load original model from timm snake_case_ :int = timm.create_model(_lowercase, pretrained=_lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ :Any = timm_model.state_dict() snake_case_ :Optional[Any] = create_rename_keys(_lowercase, _lowercase ) for src, dest in rename_keys: rename_key(_lowercase, _lowercase, _lowercase ) read_in_q_k_v(_lowercase, _lowercase, _lowercase ) # load HuggingFace model snake_case_ :Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowercase ).eval() model.load_state_dict(_lowercase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case_ :Optional[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case_ :Any = DeiTImageProcessor(size=_lowercase, crop_size=config.image_size ) snake_case_ :List[str] = image_processor(images=prepare_img(), return_tensors="""pt""" ) snake_case_ :Optional[Any] = encoding["""pixel_values"""] snake_case_ :Optional[Any] = model(_lowercase ) snake_case_ :Dict = timm_model(_lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowercase, outputs.logits, atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: snake_case_ :List[Any] = 1024 snake_case_ :int = 4096 snake_case_ :int = 24 snake_case_ :Tuple = 16 snake_case_ :Any = [5, 11, 17, 23] snake_case_ :List[Any] = [256, 512, 1024, 1024] snake_case_ :str = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: snake_case_ :List[str] = 768 snake_case_ :Any = [1, 1, 1, 0.5] snake_case_ :Optional[Any] = [256, 512, 768, 768] snake_case_ :Optional[Any] = 150 snake_case_ :List[str] = 16 snake_case_ :Optional[Any] = (1, 384, 384) snake_case_ :Tuple = False snake_case_ :List[Any] = """project""" if "ade" in checkpoint_url: snake_case_ :Dict = True snake_case_ :Optional[int] = 768 snake_case_ :int = [1, 1, 1, 0.5] snake_case_ :Any = 150 snake_case_ :Optional[Any] = 16 snake_case_ :List[Any] = """huggingface/label-files""" snake_case_ :Any = """ade20k-id2label.json""" snake_case_ :Optional[Any] = json.load(open(cached_download(hf_hub_url(_lowercase, _lowercase, repo_type="""dataset""" ) ), """r""" ) ) snake_case_ :Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()} snake_case_ :Union[str, Any] = idalabel snake_case_ :str = {v: k for k, v in idalabel.items()} snake_case_ :List[str] = [1, 150, 480, 480] return config, expected_shape def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(_lowercase, _lowercase ) def A_ ( _lowercase ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ :str = name.replace("""pretrained.model""", """dpt.encoder""" ) if "pretrained.model" in name: snake_case_ :Optional[Any] = name.replace("""pretrained.model""", """dpt.embeddings""" ) if "patch_embed" in name: snake_case_ :List[str] = name.replace("""patch_embed""", """""" ) if "pos_embed" in name: snake_case_ :int = name.replace("""pos_embed""", """position_embeddings""" ) if "attn.proj" in name: snake_case_ :Union[str, Any] = name.replace("""attn.proj""", """attention.output.dense""" ) if "proj" in name and "project" not in name: snake_case_ :str = name.replace("""proj""", """projection""" ) if "blocks" in name: snake_case_ :Dict = name.replace("""blocks""", """layer""" ) if "mlp.fc1" in name: snake_case_ :int = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: snake_case_ :int = name.replace("""mlp.fc2""", """output.dense""" ) if "norm1" in name and "backbone" not in name: snake_case_ :Optional[int] = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name and "backbone" not in name: snake_case_ :str = name.replace("""norm2""", """layernorm_after""" ) if "scratch.output_conv" in name: snake_case_ :List[str] = name.replace("""scratch.output_conv""", """head""" ) if "scratch" in name: snake_case_ :int = name.replace("""scratch""", """neck""" ) if "layer1_rn" in name: snake_case_ :Tuple = name.replace("""layer1_rn""", """convs.0""" ) if "layer2_rn" in name: snake_case_ :List[str] = name.replace("""layer2_rn""", """convs.1""" ) if "layer3_rn" in name: snake_case_ :Tuple = name.replace("""layer3_rn""", """convs.2""" ) if "layer4_rn" in name: snake_case_ :Optional[int] = name.replace("""layer4_rn""", """convs.3""" ) if "refinenet" in name: snake_case_ :Union[str, Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ :Optional[Any] = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: snake_case_ :str = name.replace("""out_conv""", """projection""" ) if "resConfUnit1" in name: snake_case_ :Union[str, Any] = name.replace("""resConfUnit1""", """residual_layer1""" ) if "resConfUnit2" in name: snake_case_ :int = name.replace("""resConfUnit2""", """residual_layer2""" ) if "conv1" in name: snake_case_ :int = name.replace("""conv1""", """convolution1""" ) if "conv2" in name: snake_case_ :str = name.replace("""conv2""", """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.0.project.0""", """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ :List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""", """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ :Optional[int] = name.replace("""pretrained.act_postprocess3.0.project.0""", """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ :int = name.replace("""pretrained.act_postprocess4.0.project.0""", """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.3""", """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: snake_case_ :Optional[Any] = name.replace("""pretrained.act_postprocess1.4""", """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: snake_case_ :int = name.replace("""pretrained.act_postprocess2.3""", """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: snake_case_ :Optional[int] = name.replace("""pretrained.act_postprocess2.4""", """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: snake_case_ :List[str] = name.replace("""pretrained.act_postprocess3.3""", """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: snake_case_ :Tuple = name.replace("""pretrained.act_postprocess4.3""", """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: snake_case_ :str = name.replace("""pretrained.act_postprocess4.4""", """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: snake_case_ :List[str] = name.replace("""pretrained""", """dpt""" ) if "bn" in name: snake_case_ :Optional[int] = name.replace("""bn""", """batch_norm""" ) if "head" in name: snake_case_ :Dict = name.replace("""head""", """head.head""" ) if "encoder.norm" in name: snake_case_ :Optional[int] = name.replace("""encoder.norm""", """layernorm""" ) if "auxlayer" in name: snake_case_ :List[str] = name.replace("""auxlayer""", """auxiliary_head.head""" ) if "backbone" in name: snake_case_ :List[str] = name.replace("""backbone""", """backbone.bit.encoder""" ) if ".." in name: snake_case_ :str = name.replace("""..""", """.""" ) if "stem.conv" in name: snake_case_ :Optional[Any] = name.replace("""stem.conv""", """bit.embedder.convolution""" ) if "blocks" in name: snake_case_ :int = name.replace("""blocks""", """layers""" ) if "convolution" in name and "backbone" in name: snake_case_ :Any = name.replace("""convolution""", """conv""" ) if "layer" in name and "backbone" in name: snake_case_ :Optional[int] = name.replace("""layer""", """layers""" ) if "backbone.bit.encoder.bit" in name: snake_case_ :Any = name.replace("""backbone.bit.encoder.bit""", """backbone.bit""" ) if "embedder.conv" in name: snake_case_ :List[Any] = name.replace("""embedder.conv""", """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: snake_case_ :Any = name.replace("""backbone.bit.encoder.stem.norm""", """backbone.bit.embedder.norm""" ) return name def A_ ( _lowercase, _lowercase ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ :str = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) snake_case_ :List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ :List[Any] = in_proj_weight[: config.hidden_size, :] snake_case_ :Union[str, Any] = in_proj_bias[: config.hidden_size] snake_case_ :List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ :List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ :Tuple = in_proj_weight[ -config.hidden_size :, : ] snake_case_ :Optional[int] = in_proj_bias[-config.hidden_size :] def A_ ( ): '''simple docstring''' snake_case_ :Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ :List[Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ) return im @torch.no_grad() def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_, snake_case_ :int = get_dpt_config(_lowercase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") snake_case_ :Any = torch.load(_lowercase, map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(_lowercase ) # rename keys for key in state_dict.copy().keys(): snake_case_ :Any = state_dict.pop(_lowercase ) snake_case_ :int = val # read in qkv matrices read_in_q_k_v(_lowercase, _lowercase ) # load HuggingFace model snake_case_ :Tuple = DPTForSemanticSegmentation(_lowercase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowercase ) model.load_state_dict(_lowercase ) model.eval() # Check outputs on an image snake_case_ :List[str] = 480 if """ade""" in checkpoint_url else 384 snake_case_ :Any = DPTImageProcessor(size=_lowercase ) snake_case_ :Any = prepare_img() snake_case_ :Tuple = image_processor(_lowercase, return_tensors="""pt""" ) # forward pass snake_case_ :str = model(**_lowercase ).logits if """ade""" in checkpoint_url else model(**_lowercase ).predicted_depth if show_prediction: snake_case_ :Union[str, Any] = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="""bicubic""", align_corners=_lowercase, ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) parser.add_argument( "--show_prediction", action="store_true", ) __a = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
66
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" ) snake_case_ :Any = json.loads(open(_lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(""".pt""" ): snake_case_ :Optional[int] = args.output + """.pt""" snake_case_ :List[str] = OrderedDict() with tf.device("""/CPU:0""" ): snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir ) snake_case_ :str = reader.get_variable_to_shape_map() for key_name in shapes.keys(): snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): snake_case_ :Any = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): snake_case_ :Optional[int] = 8 snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :List[str] = torch.tensor(_lowercase ) elif key_name.startswith("""model/moe""" ): snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/softmlp/kernel""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): snake_case_ :Dict = key_name[-9:-7] for i in range(16 ): snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) snake_case_ :Tuple = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/mlp""" ): snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p1/bias""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player snake_case_ :str = vnp.copy() # same because it is one dimensional snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/bias""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player snake_case_ :Any = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/ln""" ): snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :int = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.startswith("""model/att""" ): snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum snake_case_ :Dict = state[:, 0, :, :] snake_case_ :int = state[:, 1, :, :] snake_case_ :List[str] = state[:, 2, :, :] snake_case_ :str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[int] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player snake_case_ :int = torch.tensor(_lowercase ) snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player snake_case_ :Dict = torch.tensor(_lowercase ) snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/o/kernel""" ): snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player snake_case_ :str = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = torch.tensor(_lowercase ) elif key_name.startswith("""model/an""" ): snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) if key_name.startswith("""model/wte""" ): snake_case_ :Tuple = """lm_head.weight""" snake_case_ :List[str] = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) elif key_name.startswith("""model/wob""" ): snake_case_ :str = """final_logits_bias""" snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = state.reshape((1, -1) ) snake_case_ :Union[str, Any] = torch.tensor(_lowercase ) elif key_name == "model/dense/kernel": snake_case_ :str = """model.last_project.weight""" snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = torch.tensor(_lowercase ) elif key_name == "model/dense_1/bias": snake_case_ :Optional[int] = """model.last_project.bias""" snake_case_ :Tuple = vnp.copy() # same because it is one dimensional snake_case_ :Any = torch.tensor(_lowercase ) torch.save(_lowercase, args.output ) if __name__ == "__main__": __a = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") __a = parser.parse_args() convert_tf_gptsan_to_pt(args)
66
1
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING _A : int = TF_MODEL_FOR_MASKED_LM_MAPPING def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case_ :Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) snake_case_ :str = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) snake_case_ :Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]: snake_case_ :List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) snake_case_ :Optional[int] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) snake_case_ :Union[str, Any] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) snake_case_ :str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) snake_case_ :List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(snake_case , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def lowerCAmelCase_ ( self: Any ) -> str: snake_case_ :Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() snake_case_ :List[str] = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(snake_case , snake_case ) @slow @require_torch def lowerCAmelCase_ ( self: Dict ) -> Dict: snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(snake_case ) @slow @require_tf def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_ :Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[Any] ) -> Union[str, Any]: snake_case_ :Optional[Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(snake_case ) , [ {"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) snake_case_ :List[Any] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(snake_case ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.2_5_1, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.2_1_4, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) snake_case_ :int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(snake_case ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_ :str = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) snake_case_ :Any = None snake_case_ :Tuple = None self.run_pipeline_test(snake_case , [] ) @require_tf def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: snake_case_ :int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) snake_case_ :List[str] = None snake_case_ :List[Any] = None self.run_pipeline_test(snake_case , [] ) def lowerCAmelCase_ ( self: List[Any] , snake_case: int , snake_case: Tuple , snake_case: Optional[int] ) -> Any: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :str = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = fill_masker.tokenizer snake_case_ :List[Any] = fill_masker.model snake_case_ :int = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Optional[int] = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Union[str, Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( snake_case , [ [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], ] , ) with self.assertRaises(snake_case ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(snake_case ): fill_masker("""This is""" ) self.run_test_top_k(snake_case , snake_case ) self.run_test_targets(snake_case , snake_case ) self.run_test_top_k_targets(snake_case , snake_case ) self.fill_mask_with_duplicate_targets_and_top_k(snake_case , snake_case ) self.fill_mask_with_multiple_masks(snake_case , snake_case ) def lowerCAmelCase_ ( self: Any , snake_case: int , snake_case: int ) -> int: snake_case_ :List[str] = tokenizer.get_vocab() snake_case_ :Dict = sorted(vocab.keys() )[:2] # Pipeline argument snake_case_ :Any = FillMaskPipeline(model=snake_case , tokenizer=snake_case , targets=snake_case ) snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :List[str] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , snake_case ) snake_case_ :Optional[int] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) ) # Call argument snake_case_ :int = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Tuple = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , snake_case ) snake_case_ :Tuple = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case ) ) # Score equivalence snake_case_ :Tuple = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case ) snake_case_ :Any = [top_mask["""token_str"""] for top_mask in outputs] snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case ) == set(snake_case ): snake_case_ :Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case ) snake_case_ :Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) ) # Raises with invalid with self.assertRaises(snake_case ): snake_case_ :Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(snake_case ): snake_case_ :int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] ) with self.assertRaises(snake_case ): snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: List[str] ) -> Union[str, Any]: snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case , top_k=2 ) snake_case_ :str = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) snake_case_ :Optional[int] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Optional[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( snake_case , [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ] , ) self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) ) def lowerCAmelCase_ ( self: Any , snake_case: List[str] , snake_case: List[Any] ) -> Tuple: snake_case_ :Tuple = tokenizer.get_vocab() snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) # top_k=2, ntargets=3 snake_case_ :List[Any] = sorted(vocab.keys() )[:3] snake_case_ :Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case ) # If we use the most probably targets, and filter differently, we should still # have the same results snake_case_ :str = [el["""token_str"""] for el in sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case ).issubset(snake_case ): snake_case_ :Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case ) # They should yield exactly the same result self.assertEqual(nested_simplify(snake_case ) , nested_simplify(snake_case ) ) def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] , snake_case: Dict ) -> Tuple: snake_case_ :List[str] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Dict = tokenizer.get_vocab() # String duplicates + id duplicates snake_case_ :Optional[Any] = sorted(vocab.keys() )[:3] snake_case_ :Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]] snake_case_ :str = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(snake_case ) , 3 ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict ) -> int: snake_case_ :Union[str, Any] = FillMaskPipeline(model=snake_case , tokenizer=snake_case ) snake_case_ :Dict = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( snake_case , [ [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], [ {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, {"""sequence""": ANY(snake_case ), """score""": ANY(snake_case ), """token""": ANY(snake_case ), """token_str""": ANY(snake_case )}, ], ] , )
66
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __a = pd.read_csv("sample_data.csv", header=None) __a = df.shape[:1][0] # If you're using some other dataset input the target column __a = df.iloc[:, 1:2] __a = actual_data.values.reshape(len_data, 1) __a = MinMaxScaler().fit_transform(actual_data) __a = 10 __a = 5 __a = 20 __a = len_data - periods * look_back __a = actual_data[:division] __a = actual_data[division - look_back :] __a , __a = [], [] __a , __a = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __a = np.array(train_x) __a = np.array(test_x) __a = np.array([list(i.ravel()) for i in train_y]) __a = np.array([list(i.ravel()) for i in test_y]) __a = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") __a = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) __a = model.predict(x_test)
66
1
"""simple docstring""" import re from filelock import FileLock try: import nltk __a = True except (ImportError, ModuleNotFoundError): __a = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def A_ ( _lowercase ): '''simple docstring''' re.sub("""<n>""", """""", _lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_lowercase ) )
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"], "processing_git": ["GitProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GitForCausalLM", "GitModel", "GitPreTrainedModel", "GitVisionModel", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = XCLIPTextConfig() # derive patch size from model name snake_case_ :Union[str, Any] = model_name.find("""patch""" ) snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase ) if "large" in model_name: snake_case_ :Optional[Any] = 768 snake_case_ :Union[str, Any] = 3072 snake_case_ :Any = 12 snake_case_ :Any = 1024 snake_case_ :str = 4096 snake_case_ :Union[str, Any] = 16 snake_case_ :Union[str, Any] = 24 snake_case_ :Tuple = 768 snake_case_ :Any = 3072 if model_name == "xclip-large-patch14-16-frames": snake_case_ :Any = 336 snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase ) if "large" in model_name: snake_case_ :List[Any] = 768 return config def A_ ( _lowercase ): '''simple docstring''' if name == "token_embedding.weight": snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: snake_case_ :str = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: snake_case_ :int = name.replace("""c_proj""", """fc2""" ) if name.startswith("""transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" ) if "ln_final" in name: snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" ) if "visual.conv1" in name: snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" ) if "visual.proj" in name: snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" ) if "text_projection" in name: snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" ) if "prompts_visual_ln" in name: snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": snake_case_ :str = name.replace("""positional""", """position""" ) if name.startswith("""mit.resblocks""" ): snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" ) return name def A_ ( _lowercase, _lowercase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ :Dict = orig_state_dict.pop(_lowercase ) if "attn.in_proj" in key: snake_case_ :Optional[Any] = key.split(""".""" ) if key.startswith("""visual""" ): snake_case_ :Any = key_split[3] snake_case_ :Optional[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ :str = val[ :dim, : ] snake_case_ :Optional[int] = val[ dim : dim * 2, : ] snake_case_ :Union[str, Any] = val[ -dim:, : ] else: snake_case_ :Dict = val[ :dim ] snake_case_ :Optional[int] = val[ dim : dim * 2 ] snake_case_ :Optional[int] = val[ -dim: ] else: if "weight" in key: snake_case_ :Optional[Any] = val[ :dim, : ] snake_case_ :List[str] = val[ dim : dim * 2, : ] snake_case_ :Dict = val[ -dim:, : ] else: snake_case_ :Union[str, Any] = val[:dim] snake_case_ :Union[str, Any] = val[ dim : dim * 2 ] snake_case_ :Union[str, Any] = val[-dim:] elif key.startswith("""mit""" ): snake_case_ :Tuple = key_split[2] snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ :Optional[int] = val[:dim, :] snake_case_ :Optional[int] = val[dim : dim * 2, :] snake_case_ :str = val[-dim:, :] else: snake_case_ :str = val[:dim] snake_case_ :Any = val[dim : dim * 2] snake_case_ :int = val[-dim:] else: snake_case_ :Tuple = key_split[2] snake_case_ :Any = config.text_config.hidden_size if "weight" in key: snake_case_ :Dict = val[:dim, :] snake_case_ :Dict = val[ dim : dim * 2, : ] snake_case_ :List[str] = val[-dim:, :] else: snake_case_ :Any = val[:dim] snake_case_ :Tuple = val[ dim : dim * 2 ] snake_case_ :List[str] = val[-dim:] else: snake_case_ :Optional[int] = rename_key(_lowercase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ :Optional[Any] = val.T snake_case_ :Tuple = val return orig_state_dict def A_ ( _lowercase ): '''simple docstring''' if num_frames == 8: snake_case_ :str = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: snake_case_ :int = """eating_spaghetti.npy""" elif num_frames == 32: snake_case_ :List[str] = """eating_spaghetti_32_frames.npy""" snake_case_ :int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", ) snake_case_ :Union[str, Any] = np.load(_lowercase ) return list(_lowercase ) def A_ ( _lowercase, _lowercase=None, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } snake_case_ :Optional[int] = model_to_url[model_name] snake_case_ :int = 8 if "16-frames" in model_name: snake_case_ :List[Any] = 16 elif "shot" in model_name: snake_case_ :Dict = 32 snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase ) snake_case_ :Optional[Any] = XCLIPModel(_lowercase ) model.eval() if "drive" in checkpoint_url: snake_case_ :List[str] = """pytorch_model.bin""" gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase ) snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""] else: snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""] snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase ) snake_case_ :str = XCLIPModel(_lowercase ) snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase ) snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase ) snake_case_ :Optional[int] = prepare_video(_lowercase ) snake_case_ :Optional[Any] = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase ) print("""Shape of pixel values:""", inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ :List[Any] = model(**_lowercase ) # Verify outputs snake_case_ :List[Any] = outputs.logits_per_video snake_case_ :Any = logits_per_video.softmax(dim=1 ) print("""Probs:""", _lowercase ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] ) elif model_name == "xclip-base-patch16": snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] ) elif model_name == "xclip-large-patch14": snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(_lowercase, _lowercase, atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_lowercase, organization="""nielsr""" ) processor.push_to_hub(_lowercase, organization="""nielsr""" ) slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __a = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' return [sentence[i : i + ngram_size] for i in range(len(_lowercase ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
66
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :Any = seq_length snake_case_ :List[str] = is_training snake_case_ :Optional[Any] = use_attention_mask snake_case_ :Dict = use_token_type_ids snake_case_ :Union[str, Any] = use_labels snake_case_ :str = vocab_size snake_case_ :int = hidden_size snake_case_ :List[str] = num_hidden_layers snake_case_ :Dict = num_attention_heads snake_case_ :Any = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Any = max_position_embeddings snake_case_ :Union[str, Any] = type_vocab_size snake_case_ :Optional[int] = type_sequence_label_size snake_case_ :Union[str, Any] = initializer_range snake_case_ :Tuple = num_choices def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ :Union[str, Any] = None if self.use_attention_mask: snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ :Any = None if self.use_token_type_ids: snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ :int = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case_ :str = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case_ :int = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs snake_case_ :Union[str, Any] = True snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = True _A : Dict = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = FlaxBertModelTester(self ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Dict: # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" ) snake_case_ :Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case )
66
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = StableDiffusionSAGPipeline _A : Optional[Any] = TEXT_TO_IMAGE_PARAMS _A : Any = TEXT_TO_IMAGE_BATCH_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : List[str] = False def lowerCAmelCase_ ( self: Optional[Any] ) -> str: torch.manual_seed(0 ) snake_case_ :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ :Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) snake_case_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ :Tuple = CLIPTextModel(snake_case ) snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ :Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str: if str(snake_case ).startswith("""mps""" ): snake_case_ :Tuple = torch.manual_seed(snake_case ) else: snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case ) snake_case_ :Any = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self: Optional[int] ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: int ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Union[str, Any] = """.""" snake_case_ :str = torch.manual_seed(0 ) snake_case_ :str = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :List[Any] = output.images snake_case_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: Dict ) -> str: snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :Optional[int] = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Union[str, Any] = torch.manual_seed(0 ) snake_case_ :Tuple = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :Optional[int] = output.images snake_case_ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Optional[int] = torch.manual_seed(0 ) snake_case_ :List[str] = sag_pipe( [prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) snake_case_ :Optional[Any] = output.images assert image.shape == (1, 512, 768, 3)
66
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int: snake_case_ :Any = 0.0 snake_case_ :Tuple = 0.0 for i in range(len(snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]: for i in range(len(snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A_ ( ): '''simple docstring''' snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case_ :Optional[Any] = SelfOrganizingMap() snake_case_ :Dict = 3 snake_case_ :Dict = 0.5 for _ in range(_lowercase ): for j in range(len(_lowercase ) ): # training sample snake_case_ :List[Any] = training_samples[j] # Compute the winning vector snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase ) # Update the winning vector snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase ) # classify test sample snake_case_ :str = [0, 0, 0, 1] snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase ) # results print(f"""Clusters that the test sample belongs to : {winner}""" ) print(f"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
66
1
"""simple docstring""" from __future__ import annotations def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Any = 0 snake_case_ :Optional[int] = len(_lowercase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: snake_case_ :Union[str, Any] = i + 1 else: snake_case_ :int = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
66
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
1
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging __a = logging.get_logger(__name__) __a = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n" class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(snake_case ) def __call__( self: int , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: Optional[Any] ) -> bool: raise NotImplementedError("""StoppingCriteria needs to be subclassed""" ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: Any , snake_case: int , snake_case: Optional[int] = None ) -> List[str]: snake_case_ :Dict = max_length snake_case_ :Union[str, Any] = max_position_embeddings @add_start_docstrings(snake_case ) def __call__( self: Dict , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: str ) -> bool: snake_case_ :Dict = input_ids.shape[-1] snake_case_ :Union[str, Any] = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( """This is a friendly reminder - the current text generation call will exceed the model's predefined """ f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ """exceptions, performance degradation, or nothing at all.""" ) return is_done class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: Union[str, Any] , snake_case: int , snake_case: int ) -> Dict: warnings.warn( """The class `MaxNewTokensCriteria` is deprecated. """ f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ """with `max_length = start_length + max_new_tokens` instead.""" , snake_case , ) snake_case_ :Union[str, Any] = start_length snake_case_ :int = max_new_tokens snake_case_ :int = start_length + max_new_tokens @add_start_docstrings(snake_case ) def __call__( self: Optional[int] , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: str ) -> bool: return input_ids.shape[-1] >= self.max_length class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: Optional[int] , snake_case: float , snake_case: Optional[float] = None ) -> Union[str, Any]: snake_case_ :Tuple = max_time snake_case_ :List[Any] = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(snake_case ) def __call__( self: Tuple , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' @add_start_docstrings(snake_case ) def __call__( self: str , snake_case: torch.LongTensor , snake_case: torch.FloatTensor , **snake_case: int ) -> bool: return any(criteria(snake_case , snake_case ) for criteria in self ) @property def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: for stopping_criterium in self: if isinstance(snake_case , snake_case ): return stopping_criterium.max_length elif isinstance(snake_case , snake_case ): return stopping_criterium.max_length return None def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = stopping_criteria.max_length snake_case_ :List[str] = deepcopy(_lowercase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""", _lowercase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowercase ) ) return new_stopping_criteria
66
"""simple docstring""" import re def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(_lowercase, _lowercase ) ) if __name__ == "__main__": __a = "0094702343221" print(is_sri_lankan_phone_number(phone))
66
1
"""simple docstring""" def A_ ( _lowercase = 100 ): '''simple docstring''' snake_case_ :Dict = n * (n + 1) * (2 * n + 1) / 6 snake_case_ :Tuple = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed __a = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def A_ ( _lowercase ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :Tuple = False elif args.student_type == "gpt2": snake_case_ :Union[str, Any] = False def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :List[str] = False def A_ ( ): '''simple docstring''' snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", ) parser.add_argument( """--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", ) parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" ) parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", ) parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", ) parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", ) parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", ) parser.add_argument( """--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", ) parser.add_argument( """--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", ) parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", ) parser.add_argument( """--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", ) parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" ) parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", ) parser.add_argument( """--fp16_opt_level""", type=_lowercase, default="""O1""", help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ), ) parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" ) parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" ) snake_case_ :Tuple = parser.parse_args() sanity_checks(_lowercase ) # ARGS # init_gpu_params(_lowercase ) set_seed(_lowercase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f: json.dump(vars(_lowercase ), _lowercase, indent=4 ) git_log(args.dump_path ) snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type] snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case_ :Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase ) snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) snake_case_ :str = special_tok_ids snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file, """rb""" ) as fp: snake_case_ :str = pickle.load(_lowercase ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts, """rb""" ) as fp: snake_case_ :Optional[Any] = pickle.load(_lowercase ) snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case_ :Optional[int] = 0.0 # do not predict special tokens snake_case_ :int = torch.from_numpy(_lowercase ) else: snake_case_ :List[str] = None snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config ) snake_case_ :Union[str, Any] = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase ) else: snake_case_ :Optional[int] = student_model_class(_lowercase ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowercase, _lowercase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowercase, _lowercase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case_ :Optional[int] = Distiller( params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
66
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' if collection == []: return [] # get some information about the collection snake_case_ :Any = len(_lowercase ) snake_case_ :Optional[Any] = max(_lowercase ) snake_case_ :Tuple = min(_lowercase ) # create the counting array snake_case_ :Optional[int] = coll_max + 1 - coll_min snake_case_ :Tuple = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1, _lowercase ): snake_case_ :List[str] = counting_arr[i] + counting_arr[i - 1] # create the output collection snake_case_ :Optional[int] = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0, _lowercase ) ): snake_case_ :Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def A_ ( _lowercase ): '''simple docstring''' return "".join([chr(_lowercase ) for i in counting_sort([ord(_lowercase ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" __a = input("Enter numbers separated by a comma:\n").strip() __a = [int(item) for item in user_input.split(",")] print(counting_sort(unsorted))
66
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Any ) -> str: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]: # configuration for running training on smdistributed Model Parallel snake_case_ :Tuple = { """enabled""": True, """processes_per_host""": 8, } snake_case_ :List[Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]: # create estimator snake_case_ :List[Any] = self.create_estimator(snake_case ) # run training estimator.fit() # result dataframe snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = "▁" __a = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } __a = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } __a = { "facebook/s2t-small-librispeech-asr": 10_24, } __a = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] __a = {"mustc": MUSTC_LANGS} class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _A : int = MAX_MODEL_INPUT_SIZES _A : Dict = ["""input_ids""", """attention_mask"""] _A : List[int] = [] def __init__( self: Dict , snake_case: List[str] , snake_case: Tuple , snake_case: List[Any]="<s>" , snake_case: List[Any]="</s>" , snake_case: Optional[int]="<pad>" , snake_case: Any="<unk>" , snake_case: Tuple=False , snake_case: List[Any]=False , snake_case: int=None , snake_case: Optional[Any]=None , snake_case: Optional[Dict[str, Any]] = None , **snake_case: Tuple , ) -> None: snake_case_ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , do_upper_case=snake_case , do_lower_case=snake_case , tgt_lang=snake_case , lang_codes=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) snake_case_ :Union[str, Any] = do_upper_case snake_case_ :int = do_lower_case snake_case_ :List[str] = load_json(snake_case ) snake_case_ :Union[str, Any] = {v: k for k, v in self.encoder.items()} snake_case_ :Optional[int] = spm_file snake_case_ :List[str] = load_spm(snake_case , self.sp_model_kwargs ) if lang_codes is not None: snake_case_ :Tuple = lang_codes snake_case_ :List[Any] = LANGUAGES[lang_codes] snake_case_ :Union[str, Any] = [f"""<lang:{lang}>""" for lang in self.langs] snake_case_ :str = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} snake_case_ :Optional[int] = self.lang_tokens snake_case_ :Dict = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: snake_case_ :int = {} @property def lowerCAmelCase_ ( self: List[str] ) -> int: return len(self.encoder ) @property def lowerCAmelCase_ ( self: Dict ) -> str: return self._tgt_lang @tgt_lang.setter def lowerCAmelCase_ ( self: str , snake_case: str ) -> None: snake_case_ :Any = new_tgt_lang self.set_tgt_lang_special_tokens(snake_case ) def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None: snake_case_ :str = self.lang_code_to_id[tgt_lang] snake_case_ :List[Any] = [lang_code_id] def lowerCAmelCase_ ( self: int , snake_case: str ) -> List[str]: return self.sp_model.encode(snake_case , out_type=snake_case ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[int] ) -> List[str]: return self.encoder.get(snake_case , self.encoder[self.unk_token] ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int ) -> str: return self.decoder.get(snake_case , self.unk_token ) def lowerCAmelCase_ ( self: Dict , snake_case: List[str] ) -> str: snake_case_ :Optional[int] = [] snake_case_ :Union[str, Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: snake_case_ :Any = self.sp_model.decode(snake_case ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " snake_case_ :List[str] = [] else: current_sub_tokens.append(snake_case ) snake_case_ :Any = self.sp_model.decode(snake_case ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: str , snake_case: Any=None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowerCAmelCase_ ( self: Tuple , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ :Union[str, Any] = [1] * len(self.prefix_tokens ) snake_case_ :Any = [1] if token_ids_a is None: return prefix_ones + ([0] * len(snake_case )) + suffix_ones return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones def lowerCAmelCase_ ( self: Any ) -> Dict: snake_case_ :List[str] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self: Dict ) -> Dict: snake_case_ :Union[str, Any] = self.__dict__.copy() snake_case_ :List[Any] = None return state def __setstate__( self: Union[str, Any] , snake_case: Dict ) -> None: snake_case_ :List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ :int = {} snake_case_ :Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]: snake_case_ :Optional[Any] = Path(snake_case ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" snake_case_ :int = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) snake_case_ :Union[str, Any] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , snake_case ) if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , snake_case ) elif not os.path.isfile(self.spm_file ): with open(snake_case , """wb""" ) as fi: snake_case_ :Optional[int] = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (str(snake_case ), str(snake_case )) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Any = sentencepiece.SentencePieceProcessor(**_lowercase ) spm.Load(str(_lowercase ) ) return spm def A_ ( _lowercase ): '''simple docstring''' with open(_lowercase, """r""" ) as f: return json.load(_lowercase ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' with open(_lowercase, """w""" ) as f: json.dump(_lowercase, _lowercase, indent=2 )
66
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : '''simple docstring''' def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict: snake_case_ :Dict = parent snake_case_ :List[Any] = batch_size snake_case_ :Dict = image_size snake_case_ :Dict = patch_size snake_case_ :Tuple = num_channels snake_case_ :List[Any] = embed_dim snake_case_ :List[str] = depths snake_case_ :str = num_heads snake_case_ :Tuple = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :int = qkv_bias snake_case_ :Tuple = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Dict = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Any = use_absolute_embeddings snake_case_ :int = patch_norm snake_case_ :List[Any] = layer_norm_eps snake_case_ :Tuple = initializer_range snake_case_ :str = is_training snake_case_ :int = scope snake_case_ :Tuple = use_labels snake_case_ :Tuple = type_sequence_label_size snake_case_ :str = encoder_stride snake_case_ :List[Any] = out_features snake_case_ :str = out_indices def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :str = None if self.use_labels: snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: int ) -> Optional[Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any: snake_case_ :Dict = MaskFormerSwinModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[Any] = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case ): snake_case_ :Optional[Any] = ["""stem"""] snake_case_ :str = MaskFormerSwinBackbone(config=snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_ :Optional[int] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :str = config_and_inputs snake_case_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} _A : List[str] = False _A : Any = False _A : Dict = False _A : List[Any] = False _A : Optional[int] = False def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case_ :str = MaskFormerSwinModelTester(self ) snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Any ) -> Tuple: return def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: str ) -> List[str]: pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :str = [*signature.parameters.keys()] snake_case_ :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowerCAmelCase_ ( self: Dict ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str: snake_case_ :List[str] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :Any = outputs.hidden_states snake_case_ :Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swin has a different seq_length snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = 3 snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Any = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: List[str] ) -> str: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: str ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case: str ): snake_case_ :Optional[int] = 0 return t def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ): with torch.no_grad(): snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case ) snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple() def recursive_check(snake_case: List[Any] , snake_case: int ): if isinstance(snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ): recursive_check(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case , snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has""" f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}.""" ) , ) recursive_check(snake_case , snake_case ) for model_class in self.all_model_classes: snake_case_ :int = model_class(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) @require_torch class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ): '''simple docstring''' _A : int = (MaskFormerSwinBackbone,) if is_torch_available() else () _A : Tuple = MaskFormerSwinConfig def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: snake_case_ :List[str] = backbone_class(snake_case ) backbone.to(snake_case ) backbone.eval() snake_case_ :List[Any] = backbone(**snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case ) self.assertIsNotNone(outputs.attentions )
66
1
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = list(_lowercase ) snake_case_ :str = list(_lowercase ) snake_case_ :Tuple = 0 for i in range(len(_lowercase ) ): if lista[i] != lista[i]: count += 1 snake_case_ :str = """_""" if count > 1: return False else: return "".join(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[Any] = [] while True: snake_case_ :Union[str, Any] = ["""$"""] * len(_lowercase ) snake_case_ :Union[str, Any] = [] for i in range(len(_lowercase ) ): for j in range(i + 1, len(_lowercase ) ): snake_case_ :List[Any] = compare_string(binary[i], binary[j] ) if k is False: snake_case_ :Union[str, Any] = """*""" snake_case_ :Any = """*""" temp.append("""X""" ) for i in range(len(_lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_lowercase ) == 0: return pi snake_case_ :str = list(set(_lowercase ) ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Any = [] for minterm in minterms: snake_case_ :List[Any] = """""" for _ in range(_lowercase ): snake_case_ :List[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(_lowercase ) return temp def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = list(_lowercase ) snake_case_ :List[Any] = list(_lowercase ) snake_case_ :Tuple = 0 for i in range(len(_lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = [] snake_case_ :Dict = [0] * len(_lowercase ) for i in range(len(chart[0] ) ): snake_case_ :Optional[int] = 0 snake_case_ :str = -1 for j in range(len(_lowercase ) ): if chart[j][i] == 1: count += 1 snake_case_ :List[str] = j if count == 1: snake_case_ :Tuple = 1 for i in range(len(_lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_lowercase ) ): snake_case_ :List[str] = 0 temp.append(prime_implicants[i] ) while True: snake_case_ :Optional[int] = 0 snake_case_ :str = -1 snake_case_ :int = 0 for i in range(len(_lowercase ) ): snake_case_ :Tuple = chart[i].count(1 ) if count_n > max_n: snake_case_ :List[str] = count_n snake_case_ :Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_lowercase ) ): snake_case_ :Union[str, Any] = 0 def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Any = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )] for i in range(len(_lowercase ) ): snake_case_ :List[Any] = prime_implicants[i].count("""_""" ) for j in range(len(_lowercase ) ): if is_for_table(prime_implicants[i], binary[j], _lowercase ): snake_case_ :Union[str, Any] = 1 return chart def A_ ( ): '''simple docstring''' snake_case_ :Optional[int] = int(input("""Enter the no. of variables\n""" ) ) snake_case_ :int = [ float(_lowercase ) for x in input( """Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split() ] snake_case_ :Union[str, Any] = decimal_to_binary(_lowercase, _lowercase ) snake_case_ :str = check(_lowercase ) print("""Prime Implicants are:""" ) print(_lowercase ) snake_case_ :Any = prime_implicant_chart(_lowercase, _lowercase ) snake_case_ :Optional[Any] = selection(_lowercase, _lowercase ) print("""Essential Prime Implicants are:""" ) print(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
66
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __a = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case_ :List[str] = 4 snake_case_ :Tuple = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: List[str] ) -> Dict: return (3, 32, 32) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (3, 32, 32) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } snake_case_ :Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 4 snake_case_ :int = (32, 32) snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (4, 32, 32) @property def lowerCAmelCase_ ( self: List[Any] ) -> int: return (4, 32, 32) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: snake_case_ :Dict = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } snake_case_ :List[str] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model.to(snake_case ) snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: str ) -> Any: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model_accelerate.to(snake_case ) model_accelerate.eval() snake_case_ :List[Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case ) snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case_, snake_case_ :str = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case ) model_normal_load.to(snake_case ) model_normal_load.eval() snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""] assert torch_all_close(snake_case , snake_case , rtol=1E-3 ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(snake_case ) snake_case_ :Optional[int] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case ) with torch.no_grad(): snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) ) class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : List[Any] = """sample""" @property def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple: snake_case_ :Union[str, Any] = 4 snake_case_ :Any = 3 snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: return (3, 32, 32) @property def lowerCAmelCase_ ( self: int ) -> Tuple: return (3, 32, 32) def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case_ :List[Any] = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } snake_case_ :int = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :Any = self.dummy_input snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case ) snake_case_ :int = noise snake_case_ :int = model(**snake_case ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self: str ) -> Dict: snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(snake_case ) snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 3 snake_case_ :List[str] = (256, 256) snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :Dict = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(snake_case ) snake_case_ :Optional[int] = 4 snake_case_ :Optional[Any] = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :str = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: # not required for this model pass
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
66
1
"""simple docstring""" import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: # A mock response for an HTTP head request to emulate server down snake_case_ :Any = mock.Mock() snake_case_ :int = 500 snake_case_ :Optional[Any] = {} snake_case_ :Optional[Any] = HTTPError snake_case_ :Union[str, Any] = {} # Download this model to make sure it's in the cache. snake_case_ :List[str] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=snake_case ) as mock_head: snake_case_ :str = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down snake_case_ :Any = mock.Mock() snake_case_ :Dict = 500 snake_case_ :Any = {} snake_case_ :List[Any] = HTTPError snake_case_ :Optional[Any] = {} # Download this model to make sure it's in the cache. snake_case_ :Any = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=snake_case ) as mock_head: snake_case_ :Union[str, Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self: Optional[int] ) -> Any: # This test is for deprecated behavior and can be removed in v5 try: snake_case_ :List[str] = tempfile.mktemp() with open(snake_case , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , snake_case ) snake_case_ :List[str] = AlbertTokenizer.from_pretrained(snake_case ) finally: os.remove(snake_case ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , snake_case ) snake_case_ :List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def lowerCAmelCase_ ( self: int ) -> Tuple: # This test is for deprecated behavior and can be removed in v5 snake_case_ :str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' _A : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def lowerCAmelCase_ ( cls: str ) -> Tuple: snake_case_ :int = TOKEN HfFolder.save_token(snake_case ) @classmethod def lowerCAmelCase_ ( cls: Any ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def lowerCAmelCase_ ( self: Tuple ) -> List[str]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ :Tuple = os.path.join(snake_case , """vocab.txt""" ) with open(snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) snake_case_ :Any = BertTokenizer(snake_case ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) snake_case_ :Optional[int] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case , repo_id="""test-tokenizer""" , push_to_hub=snake_case , use_auth_token=self._token ) snake_case_ :Optional[int] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ :Union[str, Any] = os.path.join(snake_case , """vocab.txt""" ) with open(snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) snake_case_ :Dict = BertTokenizer(snake_case ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) snake_case_ :List[str] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( snake_case , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=snake_case , use_auth_token=self._token ) snake_case_ :Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ :int = os.path.join(snake_case , """vocab.txt""" ) with open(snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) snake_case_ :Union[str, Any] = CustomTokenizer(snake_case ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) snake_case_ :Optional[int] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ :int = os.path.join(snake_case , """vocab.txt""" ) with open(snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) snake_case_ :Optional[int] = BertTokenizerFast.from_pretrained(snake_case ) bert_tokenizer.save_pretrained(snake_case ) snake_case_ :List[Any] = CustomTokenizerFast.from_pretrained(snake_case ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) snake_case_ :List[Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) snake_case_ :List[Any] = AutoTokenizer.from_pretrained( f"""{USER}/test-dynamic-tokenizer""" , use_fast=snake_case , trust_remote_code=snake_case ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case_ :Tuple = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case_ :List[Any] = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def lowerCAmelCase_ ( self: Optional[int] ) -> str: snake_case_ :Union[str, Any] = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Any = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def lowerCAmelCase_ ( self: Any ) -> int: snake_case_ :Optional[int] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_ :int = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Dict = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def lowerCAmelCase_ ( self: Tuple ) -> Any: # Even if the offsets are wrong, we necessarily output correct string # parts. snake_case_ :List[Any] = Trie() snake_case_ :Tuple = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(snake_case , ["""AB""", """C"""] )
66
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = StableDiffusionSAGPipeline _A : Optional[Any] = TEXT_TO_IMAGE_PARAMS _A : Any = TEXT_TO_IMAGE_BATCH_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : List[str] = False def lowerCAmelCase_ ( self: Optional[Any] ) -> str: torch.manual_seed(0 ) snake_case_ :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ :Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) snake_case_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ :Tuple = CLIPTextModel(snake_case ) snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ :Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str: if str(snake_case ).startswith("""mps""" ): snake_case_ :Tuple = torch.manual_seed(snake_case ) else: snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case ) snake_case_ :Any = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self: Optional[int] ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: int ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Union[str, Any] = """.""" snake_case_ :str = torch.manual_seed(0 ) snake_case_ :str = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :List[Any] = output.images snake_case_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: Dict ) -> str: snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :Optional[int] = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Union[str, Any] = torch.manual_seed(0 ) snake_case_ :Tuple = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :Optional[int] = output.images snake_case_ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Optional[int] = torch.manual_seed(0 ) snake_case_ :List[str] = sag_pipe( [prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) snake_case_ :Optional[Any] = output.images assert image.shape == (1, 512, 768, 3)
66
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run __a = True except (ImportError, AttributeError): __a = object def A_ ( *_lowercase, **_lowercase ): '''simple docstring''' pass __a = False __a = logging.get_logger("transformers-cli/serving") def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[Any] = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) return ServeCommand(_lowercase, args.host, args.port, args.workers ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : dict class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : List[str] _A : Optional[List[int]] class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : str class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Any class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' @staticmethod def lowerCAmelCase_ ( snake_case: ArgumentParser ) -> Tuple: snake_case_ :Any = parser.add_parser( """serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" ) serve_parser.add_argument( """--task""" , type=snake_case , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , ) serve_parser.add_argument("""--host""" , type=snake_case , default="""localhost""" , help="""Interface the server will listen on.""" ) serve_parser.add_argument("""--port""" , type=snake_case , default=8_888 , help="""Port the serving will listen to.""" ) serve_parser.add_argument("""--workers""" , type=snake_case , default=1 , help="""Number of http workers""" ) serve_parser.add_argument("""--model""" , type=snake_case , help="""Model's name or path to stored model.""" ) serve_parser.add_argument("""--config""" , type=snake_case , help="""Model's config name or path to stored model.""" ) serve_parser.add_argument("""--tokenizer""" , type=snake_case , help="""Tokenizer name to use.""" ) serve_parser.add_argument( """--device""" , type=snake_case , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) serve_parser.set_defaults(func=snake_case ) def __init__( self: int , snake_case: Pipeline , snake_case: str , snake_case: int , snake_case: int ) -> List[Any]: snake_case_ :Optional[Any] = pipeline snake_case_ :Optional[Any] = host snake_case_ :Optional[Any] = port snake_case_ :Tuple = workers if not _serve_dependencies_installed: raise RuntimeError( """Using serve command requires FastAPI and uvicorn. """ """Please install transformers with [serving]: pip install \"transformers[serving]\".""" """Or install FastAPI and uvicorn separately.""" ) else: logger.info(f"""Serving model over {host}:{port}""" ) snake_case_ :List[str] = FastAPI( routes=[ APIRoute( """/""" , self.model_info , response_model=snake_case , response_class=snake_case , methods=["""GET"""] , ), APIRoute( """/tokenize""" , self.tokenize , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ), APIRoute( """/detokenize""" , self.detokenize , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ), APIRoute( """/forward""" , self.forward , response_model=snake_case , response_class=snake_case , methods=["""POST"""] , ), ] , timeout=600 , ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: run(self._app , host=self.host , port=self.port , workers=self.workers ) def lowerCAmelCase_ ( self: Any ) -> Any: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def lowerCAmelCase_ ( self: Tuple , snake_case: str = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) ) -> Union[str, Any]: try: snake_case_ :Dict = self._pipeline.tokenizer.tokenize(snake_case ) if return_ids: snake_case_ :int = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case ) return ServeTokenizeResult(tokens=snake_case , tokens_ids=snake_case ) else: return ServeTokenizeResult(tokens=snake_case ) except Exception as e: raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(snake_case )} ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[int] = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) , snake_case: bool = Body(snake_case , embed=snake_case ) , ) -> Union[str, Any]: try: snake_case_ :Dict = self._pipeline.tokenizer.decode(snake_case , snake_case , snake_case ) return ServeDeTokenizeResult(model="""""" , text=snake_case ) except Exception as e: raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(snake_case )} ) async def lowerCAmelCase_ ( self: Dict , snake_case: Optional[int]=Body(snake_case , embed=snake_case ) ) -> Union[str, Any]: # Check we don't have empty string if len(snake_case ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model snake_case_ :List[str] = self._pipeline(snake_case ) return ServeForwardResult(output=snake_case ) except Exception as e: raise HTTPException(500 , {"""error""": str(snake_case )} )
66
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class lowerCamelCase : '''simple docstring''' def __init__( self: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = {} def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None: snake_case_ :str = {} def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None: if nodea not in self.connections: self.add_node(snake_case ) if nodea not in self.connections: self.add_node(snake_case ) snake_case_ :Dict = probability def lowerCAmelCase_ ( self: List[Any] ) -> list[str]: return list(self.connections ) def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str: snake_case_ :Optional[Any] = 0 snake_case_ :List[str] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowercase, _lowercase, _lowercase ) snake_case_ :int = Counter(graph.get_nodes() ) snake_case_ :Optional[Any] = start for _ in range(_lowercase ): snake_case_ :Tuple = graph.transition(_lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :Any = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository snake_case_ :Dict = {} for k, v in state_dict.items(): if "pred_layer" in k: snake_case_ :Optional[Any] = v else: snake_case_ :List[str] = v snake_case_ :List[Any] = chkpt["""params"""] snake_case_ :str = {n: v for n, v in config.items() if not isinstance(_lowercase, (torch.FloatTensor, numpy.ndarray) )} snake_case_ :List[Any] = chkpt["""dico_word2id"""] snake_case_ :Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""", """""" ): i for s, i in vocab.items()} # Save pytorch-model snake_case_ :Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME snake_case_ :List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME snake_case_ :Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(_lowercase, _lowercase ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_lowercase, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(_lowercase, indent=2 ) + """\n""" ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(_lowercase, """w""", encoding="""utf-8""" ) as f: f.write(json.dumps(_lowercase, indent=2 ) + """\n""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __a = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
66
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __a = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __a = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase ) return [m.group(0 ) for m in matches] def A_ ( ): '''simple docstring''' snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ :Dict = { config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. snake_case_ :Optional[Any] = collections.defaultdict(_lowercase ) snake_case_ :int = collections.defaultdict(_lowercase ) snake_case_ :List[str] = collections.defaultdict(_lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(_lowercase ): snake_case_ :int = None if _re_tf_models.match(_lowercase ) is not None: snake_case_ :int = tf_models snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0] elif _re_flax_models.match(_lowercase ) is not None: snake_case_ :List[Any] = flax_models snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0] elif _re_pt_models.match(_lowercase ) is not None: snake_case_ :Optional[Any] = pt_models snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0] if lookup_dict is not None: while len(_lowercase ) > 0: if attr_name in model_prefix_to_model_type: snake_case_ :Optional[int] = True break # Try again after removing the last word in the name snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] ) snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) snake_case_ :Optional[Any] = list(_lowercase ) all_models.sort() snake_case_ :Optional[int] = {"""model_type""": all_models} snake_case_ :Optional[int] = [pt_models[t] for t in all_models] snake_case_ :Any = [tf_models[t] for t in all_models] snake_case_ :Dict = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure snake_case_ :Dict = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: snake_case_ :Optional[Any] = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: snake_case_ :Tuple = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: snake_case_ :Tuple = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. snake_case_ :str = """AutoTokenizer""" snake_case_ :int = [processors[t] for t in all_models] return pd.DataFrame(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ): # The type of pipeline may not exist in this framework if not hasattr(_lowercase, _lowercase ): continue # First extract all model_names snake_case_ :Tuple = [] for name in getattr(_lowercase, _lowercase ).values(): if isinstance(_lowercase, _lowercase ): model_names.append(_lowercase ) else: model_names.extend(list(_lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = get_frameworks_table() snake_case_ :str = Dataset.from_pandas(_lowercase ) snake_case_ :List[Any] = hf_hub_download( """huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase ) snake_case_ :List[str] = Dataset.from_json(_lowercase ) snake_case_ :int = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(_lowercase ) ) } snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. snake_case_ :Tuple = sorted(table.keys() ) snake_case_ :Tuple = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) ) if commit_sha is not None: snake_case_ :Union[str, Any] = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: snake_case_ :List[Any] = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, ) def A_ ( ): '''simple docstring''' snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS snake_case_ :List[str] = [] for key in pipeline_tasks: if key not in in_table: snake_case_ :int = pipeline_tasks[key]["""pt"""] if isinstance(_lowercase, (list, tuple) ): snake_case_ :Any = model[0] snake_case_ :str = model.__name__ if model not in in_table.values(): missing.append(_lowercase ) if len(_lowercase ) > 0: snake_case_ :Optional[int] = """, """.join(_lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __a = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
66
1
"""simple docstring""" from math import factorial def A_ ( _lowercase = 20 ): '''simple docstring''' snake_case_ :List[str] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case_ :Union[str, Any] = n // 2 return int(factorial(_lowercase ) / (factorial(_lowercase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __a = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
66
"""simple docstring""" import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __a = logging.getLogger(__name__) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Union[str, Any] = """token-classification""" def __init__( self: Any , snake_case: Tuple ) -> List[Any]: if type(snake_case ) == dict: snake_case_ :Optional[int] = Namespace(**snake_case ) snake_case_ :Optional[int] = import_module("""tasks""" ) try: snake_case_ :Any = getattr(snake_case , hparams.task_type ) snake_case_ :TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels ) snake_case_ :str = CrossEntropyLoss().ignore_index super().__init__(snake_case , len(self.labels ) , self.mode ) def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any: return self.model(**snake_case ) def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]: snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :List[str] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Optional[Any] = self(**snake_case ) snake_case_ :List[str] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_ :List[Any] = self.hparams for mode in ["train", "dev", "test"]: snake_case_ :Optional[int] = self._feature_file(snake_case ) if os.path.exists(snake_case ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :Optional[int] = torch.load(snake_case ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case ) snake_case_ :Any = self.token_classification_task.convert_examples_to_features( snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , snake_case ) torch.save(snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader: snake_case_ :int = self._feature_file(snake_case ) logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :str = torch.load(snake_case ) snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]: """Compute validation""" "" snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :Dict = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Dict = self(**snake_case ) snake_case_, snake_case_ :Dict = outputs[:2] snake_case_ :Union[str, Any] = logits.detach().cpu().numpy() snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple: snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) snake_case_ :Tuple = np.argmax(snake_case , axis=2 ) snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) snake_case_ :Optional[Any] = dict(enumerate(self.labels ) ) snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) snake_case_ :str = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(snake_case , snake_case ), """precision""": precision_score(snake_case , snake_case ), """recall""": recall_score(snake_case , snake_case ), """f1""": fa_score(snake_case , snake_case ), } snake_case_ :List[Any] = dict(results.items() ) snake_case_ :Union[str, Any] = results return ret, preds_list, out_label_list def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]: # when stable snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case ) snake_case_ :str = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any: # updating to test_epoch_end instead of deprecated test_end snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 snake_case_ :Optional[int] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict: # Add NER specific options BaseTransformer.add_model_specific_args(snake_case , snake_case ) parser.add_argument( """--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=snake_case , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __a = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __a = NERTransformer.add_model_specific_args(parser, os.getcwd()) __a = parser.parse_args() __a = NERTransformer(args) __a = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __a = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
66
1
"""simple docstring""" from scipy.stats import pearsonr import datasets __a = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n" __a = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n" __a = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[int] , snake_case: List[Any] , snake_case: Optional[int]=False ) -> int: if return_pvalue: snake_case_ :Union[str, Any] = pearsonr(snake_case , snake_case ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(snake_case , snake_case )[0] )}
66
"""simple docstring""" from math import factorial class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple: snake_case_ :List[Any] = real if isinstance(snake_case , snake_case ): snake_case_ :Tuple = [1] * rank else: snake_case_ :Optional[Any] = rank def __repr__( self: List[str] ) -> Tuple: return ( f"""{self.real}+""" f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: snake_case_ :Any = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case ) def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]: if not isinstance(snake_case , snake_case ): return Dual(self.real + other , self.duals ) snake_case_ :List[Any] = self.duals.copy() snake_case_ :Tuple = other.duals.copy() if len(snake_case ) > len(snake_case ): o_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) elif len(snake_case ) < len(snake_case ): s_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) snake_case_ :Dict = [] for i in range(len(snake_case ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case ) _A : str = __add__ def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple: return self + other * -1 def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]: if not isinstance(snake_case , snake_case ): snake_case_ :Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case ) snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case ) _A : int = __mul__ def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case ) raise ValueError def __floordiv__( self: int , snake_case: List[Any] ) -> Any: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case ) raise ValueError def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]: if n < 0 or isinstance(snake_case , snake_case ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self snake_case_ :str = self for _ in range(n - 1 ): x *= self return x def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if not callable(_lowercase ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(_lowercase, (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(_lowercase, _lowercase ): raise ValueError("""differentiate() requires an int as input for order""" ) snake_case_ :Optional[Any] = Dual(_lowercase, 1 ) snake_case_ :List[Any] = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def A_ ( _lowercase ): '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
66
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Tuple = """audio-spectrogram-transformer""" def __init__( self: Tuple , snake_case: Dict=768 , snake_case: Tuple=12 , snake_case: Tuple=12 , snake_case: str=3_072 , snake_case: List[Any]="gelu" , snake_case: int=0.0 , snake_case: List[str]=0.0 , snake_case: Optional[Any]=0.0_2 , snake_case: Tuple=1E-12 , snake_case: int=16 , snake_case: List[str]=True , snake_case: Dict=10 , snake_case: Dict=10 , snake_case: Any=1_024 , snake_case: List[str]=128 , **snake_case: List[str] , ) -> Optional[int]: super().__init__(**snake_case ) snake_case_ :Optional[Any] = hidden_size snake_case_ :int = num_hidden_layers snake_case_ :Optional[int] = num_attention_heads snake_case_ :int = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Tuple = attention_probs_dropout_prob snake_case_ :Union[str, Any] = initializer_range snake_case_ :Any = layer_norm_eps snake_case_ :str = patch_size snake_case_ :List[str] = qkv_bias snake_case_ :str = frequency_stride snake_case_ :Any = time_stride snake_case_ :Dict = max_length snake_case_ :List[Any] = num_mel_bins
66
"""simple docstring""" from __future__ import annotations __a = 10 def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = 1 snake_case_ :List[str] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ :list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ :Any = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints snake_case_ :Optional[Any] = 0 for b in range(_lowercase ): for i in buckets[b]: snake_case_ :Union[str, Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __a = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } __a = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def A_ ( _lowercase, _lowercase=False ): '''simple docstring''' snake_case_, snake_case_ :int = create_model( """HTSAT-tiny""", """roberta""", _lowercase, precision="""fp32""", device="""cuda:0""" if torch.cuda.is_available() else """cpu""", enable_fusion=_lowercase, fusion_type="""aff_2d""" if enable_fusion else None, ) return model, model_cfg def A_ ( _lowercase ): '''simple docstring''' snake_case_ :str = {} snake_case_ :Optional[int] = r""".*sequential.(\d+).*""" snake_case_ :Union[str, Any] = r""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: snake_case_ :Tuple = key.replace(_lowercase, _lowercase ) if re.match(_lowercase, _lowercase ): # replace sequential layers with list snake_case_ :Union[str, Any] = re.match(_lowercase, _lowercase ).group(1 ) snake_case_ :Optional[int] = key.replace(f"""sequential.{sequential_layer}.""", f"""layers.{int(_lowercase )//3}.linear.""" ) elif re.match(_lowercase, _lowercase ): snake_case_ :List[Any] = int(re.match(_lowercase, _lowercase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... snake_case_ :List[Any] = 1 if projecton_layer == 0 else 2 snake_case_ :Optional[Any] = key.replace(f"""_projection.{projecton_layer}.""", f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value snake_case_ :Dict = value snake_case_ :str = mixed_qkv.size(0 ) // 3 snake_case_ :List[Any] = mixed_qkv[:qkv_dim] snake_case_ :List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2] snake_case_ :Dict = mixed_qkv[qkv_dim * 2 :] snake_case_ :Tuple = query_layer snake_case_ :Optional[Any] = key_layer snake_case_ :Any = value_layer else: snake_case_ :Any = value return model_state_dict def A_ ( _lowercase, _lowercase, _lowercase, _lowercase=False ): '''simple docstring''' snake_case_, snake_case_ :Union[str, Any] = init_clap(_lowercase, enable_fusion=_lowercase ) clap_model.eval() snake_case_ :Any = clap_model.state_dict() snake_case_ :Dict = rename_state_dict(_lowercase ) snake_case_ :Optional[int] = ClapConfig() snake_case_ :Optional[int] = enable_fusion snake_case_ :Dict = ClapModel(_lowercase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowercase, strict=_lowercase ) model.save_pretrained(_lowercase ) transformers_config.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") __a = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A_ ( _lowercase ): '''simple docstring''' return (data["data"], data["target"]) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Tuple = XGBClassifier() classifier.fit(_lowercase, _lowercase ) return classifier def A_ ( ): '''simple docstring''' snake_case_ :Any = load_iris() snake_case_, snake_case_ :List[str] = data_handling(_lowercase ) snake_case_, snake_case_, snake_case_, snake_case_ :Union[str, Any] = train_test_split( _lowercase, _lowercase, test_size=0.25 ) snake_case_ :Dict = iris["""target_names"""] # Create an XGBoost Classifier from the training data snake_case_ :Dict = xgboost(_lowercase, _lowercase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _lowercase, _lowercase, _lowercase, display_labels=_lowercase, cmap="""Blues""", normalize="""true""", ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
66
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :Union[str, Any] = controlnet_params snake_case_ :Union[str, Any] = """bird""" snake_case_ :List[Any] = jax.device_count() snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case_ :Any = jax.random.PRNGKey(0 ) snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() ) snake_case_ :List[Any] = replicate(snake_case ) snake_case_ :List[str] = shard(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :Dict = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1] snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Dict = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :str = controlnet_params snake_case_ :Optional[int] = """Chef in the kitchen""" snake_case_ :Union[str, Any] = jax.device_count() snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case_ :str = jax.random.PRNGKey(0 ) snake_case_ :str = jax.random.split(snake_case , jax.device_count() ) snake_case_ :Tuple = replicate(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :int = shard(snake_case ) snake_case_ :List[str] = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :int = images[0, 253:256, 253:256, -1] snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Optional[int] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" __a = [0, 2, 4, 6, 8] __a = [1, 3, 5, 7, 9] def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1, -1, -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case_ :Union[str, Any] = 0 for digit in range(10 ): snake_case_ :Any = digit result += reversible_numbers( 0, (remainder + 2 * digit) // 10, _lowercase, _lowercase ) return result snake_case_ :int = 0 for digita in range(10 ): snake_case_ :Optional[int] = digita if (remainder + digita) % 2 == 0: snake_case_ :int = ODD_DIGITS else: snake_case_ :Dict = EVEN_DIGITS for digita in other_parity_digits: snake_case_ :List[str] = digita result += reversible_numbers( remaining_length - 2, (remainder + digita + digita) // 10, _lowercase, _lowercase, ) return result def A_ ( _lowercase = 9 ): '''simple docstring''' snake_case_ :Any = 0 for length in range(1, max_power + 1 ): result += reversible_numbers(_lowercase, 0, [0] * length, _lowercase ) return result if __name__ == "__main__": print(F"""{solution() = }""")
66
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" ) snake_case_ :Any = json.loads(open(_lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(""".pt""" ): snake_case_ :Optional[int] = args.output + """.pt""" snake_case_ :List[str] = OrderedDict() with tf.device("""/CPU:0""" ): snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir ) snake_case_ :str = reader.get_variable_to_shape_map() for key_name in shapes.keys(): snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): snake_case_ :Any = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): snake_case_ :Optional[int] = 8 snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :List[str] = torch.tensor(_lowercase ) elif key_name.startswith("""model/moe""" ): snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/softmlp/kernel""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): snake_case_ :Dict = key_name[-9:-7] for i in range(16 ): snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) snake_case_ :Tuple = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/mlp""" ): snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p1/bias""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player snake_case_ :str = vnp.copy() # same because it is one dimensional snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/bias""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player snake_case_ :Any = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/ln""" ): snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :int = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.startswith("""model/att""" ): snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum snake_case_ :Dict = state[:, 0, :, :] snake_case_ :int = state[:, 1, :, :] snake_case_ :List[str] = state[:, 2, :, :] snake_case_ :str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[int] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player snake_case_ :int = torch.tensor(_lowercase ) snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player snake_case_ :Dict = torch.tensor(_lowercase ) snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/o/kernel""" ): snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player snake_case_ :str = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = torch.tensor(_lowercase ) elif key_name.startswith("""model/an""" ): snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) if key_name.startswith("""model/wte""" ): snake_case_ :Tuple = """lm_head.weight""" snake_case_ :List[str] = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) elif key_name.startswith("""model/wob""" ): snake_case_ :str = """final_logits_bias""" snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = state.reshape((1, -1) ) snake_case_ :Union[str, Any] = torch.tensor(_lowercase ) elif key_name == "model/dense/kernel": snake_case_ :str = """model.last_project.weight""" snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = torch.tensor(_lowercase ) elif key_name == "model/dense_1/bias": snake_case_ :Optional[int] = """model.last_project.bias""" snake_case_ :Tuple = vnp.copy() # same because it is one dimensional snake_case_ :Any = torch.tensor(_lowercase ) torch.save(_lowercase, args.output ) if __name__ == "__main__": __a = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") __a = parser.parse_args() convert_tf_gptsan_to_pt(args)
66
1
"""simple docstring""" from __future__ import annotations import numpy as np def A_ ( _lowercase ): '''simple docstring''' return np.maximum(0, _lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
66
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __a = pd.read_csv("sample_data.csv", header=None) __a = df.shape[:1][0] # If you're using some other dataset input the target column __a = df.iloc[:, 1:2] __a = actual_data.values.reshape(len_data, 1) __a = MinMaxScaler().fit_transform(actual_data) __a = 10 __a = 5 __a = 20 __a = len_data - periods * look_back __a = actual_data[:division] __a = actual_data[division - look_back :] __a , __a = [], [] __a , __a = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __a = np.array(train_x) __a = np.array(test_x) __a = np.array([list(i.ravel()) for i in train_y]) __a = np.array([list(i.ravel()) for i in test_y]) __a = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") __a = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) __a = model.predict(x_test)
66
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def A_ ( _lowercase ): '''simple docstring''' return ConvertCommand( args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name ) __a = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' @staticmethod def lowerCAmelCase_ ( snake_case: ArgumentParser ) -> List[str]: snake_case_ :Union[str, Any] = parser.add_parser( """convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , ) train_parser.add_argument("""--model_type""" , type=snake_case , required=snake_case , help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""" , type=snake_case , required=snake_case , help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" , type=snake_case , required=snake_case , help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" , type=snake_case , default="""""" , help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" , type=snake_case , default=snake_case , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , ) train_parser.set_defaults(func=snake_case ) def __init__( self: Optional[int] , snake_case: str , snake_case: str , snake_case: str , snake_case: str , snake_case: str , *snake_case: Dict , ) -> List[Any]: snake_case_ :Union[str, Any] = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(f"""Loading model {model_type}""" ) snake_case_ :Any = model_type snake_case_ :List[Any] = tf_checkpoint snake_case_ :Union[str, Any] = pytorch_dump_output snake_case_ :Optional[Any] = config snake_case_ :Optional[Any] = finetuning_task_name def lowerCAmelCase_ ( self: str ) -> Tuple: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(snake_case ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) if "ckpt" in self._tf_checkpoint.lower(): snake_case_ :Tuple = self._tf_checkpoint snake_case_ :List[Any] = """""" else: snake_case_ :Union[str, Any] = self._tf_checkpoint snake_case_ :Union[str, Any] = """""" convert_transfo_xl_checkpoint_to_pytorch( snake_case , self._config , self._pytorch_dump_output , snake_case ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(snake_case ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" from jiwer import compute_measures import datasets __a = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" __a = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" __a = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def lowerCAmelCase_ ( self: int , snake_case: Optional[Any]=None , snake_case: Dict=None , snake_case: Any=False ) -> Optional[int]: if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: snake_case_ :List[str] = 0 snake_case_ :Dict = 0 for prediction, reference in zip(snake_case , snake_case ): snake_case_ :List[str] = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
66
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = XCLIPTextConfig() # derive patch size from model name snake_case_ :Union[str, Any] = model_name.find("""patch""" ) snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase ) if "large" in model_name: snake_case_ :Optional[Any] = 768 snake_case_ :Union[str, Any] = 3072 snake_case_ :Any = 12 snake_case_ :Any = 1024 snake_case_ :str = 4096 snake_case_ :Union[str, Any] = 16 snake_case_ :Union[str, Any] = 24 snake_case_ :Tuple = 768 snake_case_ :Any = 3072 if model_name == "xclip-large-patch14-16-frames": snake_case_ :Any = 336 snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase ) if "large" in model_name: snake_case_ :List[Any] = 768 return config def A_ ( _lowercase ): '''simple docstring''' if name == "token_embedding.weight": snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: snake_case_ :str = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: snake_case_ :int = name.replace("""c_proj""", """fc2""" ) if name.startswith("""transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" ) if "ln_final" in name: snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" ) if "visual.conv1" in name: snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" ) if "visual.proj" in name: snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" ) if "text_projection" in name: snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" ) if "prompts_visual_ln" in name: snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": snake_case_ :str = name.replace("""positional""", """position""" ) if name.startswith("""mit.resblocks""" ): snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" ) return name def A_ ( _lowercase, _lowercase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ :Dict = orig_state_dict.pop(_lowercase ) if "attn.in_proj" in key: snake_case_ :Optional[Any] = key.split(""".""" ) if key.startswith("""visual""" ): snake_case_ :Any = key_split[3] snake_case_ :Optional[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ :str = val[ :dim, : ] snake_case_ :Optional[int] = val[ dim : dim * 2, : ] snake_case_ :Union[str, Any] = val[ -dim:, : ] else: snake_case_ :Dict = val[ :dim ] snake_case_ :Optional[int] = val[ dim : dim * 2 ] snake_case_ :Optional[int] = val[ -dim: ] else: if "weight" in key: snake_case_ :Optional[Any] = val[ :dim, : ] snake_case_ :List[str] = val[ dim : dim * 2, : ] snake_case_ :Dict = val[ -dim:, : ] else: snake_case_ :Union[str, Any] = val[:dim] snake_case_ :Union[str, Any] = val[ dim : dim * 2 ] snake_case_ :Union[str, Any] = val[-dim:] elif key.startswith("""mit""" ): snake_case_ :Tuple = key_split[2] snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ :Optional[int] = val[:dim, :] snake_case_ :Optional[int] = val[dim : dim * 2, :] snake_case_ :str = val[-dim:, :] else: snake_case_ :str = val[:dim] snake_case_ :Any = val[dim : dim * 2] snake_case_ :int = val[-dim:] else: snake_case_ :Tuple = key_split[2] snake_case_ :Any = config.text_config.hidden_size if "weight" in key: snake_case_ :Dict = val[:dim, :] snake_case_ :Dict = val[ dim : dim * 2, : ] snake_case_ :List[str] = val[-dim:, :] else: snake_case_ :Any = val[:dim] snake_case_ :Tuple = val[ dim : dim * 2 ] snake_case_ :List[str] = val[-dim:] else: snake_case_ :Optional[int] = rename_key(_lowercase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ :Optional[Any] = val.T snake_case_ :Tuple = val return orig_state_dict def A_ ( _lowercase ): '''simple docstring''' if num_frames == 8: snake_case_ :str = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: snake_case_ :int = """eating_spaghetti.npy""" elif num_frames == 32: snake_case_ :List[str] = """eating_spaghetti_32_frames.npy""" snake_case_ :int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", ) snake_case_ :Union[str, Any] = np.load(_lowercase ) return list(_lowercase ) def A_ ( _lowercase, _lowercase=None, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } snake_case_ :Optional[int] = model_to_url[model_name] snake_case_ :int = 8 if "16-frames" in model_name: snake_case_ :List[Any] = 16 elif "shot" in model_name: snake_case_ :Dict = 32 snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase ) snake_case_ :Optional[Any] = XCLIPModel(_lowercase ) model.eval() if "drive" in checkpoint_url: snake_case_ :List[str] = """pytorch_model.bin""" gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase ) snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""] else: snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""] snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase ) snake_case_ :str = XCLIPModel(_lowercase ) snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase ) snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase ) snake_case_ :Optional[int] = prepare_video(_lowercase ) snake_case_ :Optional[Any] = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase ) print("""Shape of pixel values:""", inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ :List[Any] = model(**_lowercase ) # Verify outputs snake_case_ :List[Any] = outputs.logits_per_video snake_case_ :Any = logits_per_video.softmax(dim=1 ) print("""Probs:""", _lowercase ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] ) elif model_name == "xclip-base-patch16": snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] ) elif model_name == "xclip-large-patch14": snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(_lowercase, _lowercase, atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_lowercase, organization="""nielsr""" ) processor.push_to_hub(_lowercase, organization="""nielsr""" ) slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __a = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
66
1
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = torch.load(_lowercase, map_location="""cpu""" ) if "model" in sd.keys(): snake_case_ :str = torch.load(_lowercase, map_location="""cpu""" )["""model"""] # pop unnecessary weights snake_case_ :Tuple = [ """decoder.version""", """decoder.output_projection.weight""", ] for key in keys_to_delete: if key in sd: sd.pop(_lowercase ) snake_case_ :str = { """decoder.project_in_dim.weight""": """decoder.project_in.weight""", """decoder.project_out_dim.weight""": """decoder.project_out.weight""", """decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: snake_case_ :List[Any] = sd.pop(_lowercase ) snake_case_ :Tuple = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: snake_case_ :Any = sd[key] # We split QKV in separate Q,K,V snake_case_ :Dict = key.replace(""".qkv_proj.""", """.q_proj.""" ) snake_case_ :Optional[Any] = key.replace(""".qkv_proj.""", """.k_proj.""" ) snake_case_ :Optional[Any] = key.replace(""".qkv_proj.""", """.v_proj.""" ) snake_case_ :Dict = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 snake_case_, snake_case_, snake_case_ :Any = torch.split(_lowercase, depth // 3, dim=0 ) snake_case_ :List[Any] = q snake_case_ :Union[str, Any] = k snake_case_ :Optional[int] = v del sd[key] return sd @torch.no_grad() def A_ ( _lowercase, _lowercase, _lowercase=None ): '''simple docstring''' snake_case_ :Optional[int] = load_checkpoint(_lowercase ) if config is not None: snake_case_ :List[str] = OPTConfig.from_pretrained(_lowercase ) else: snake_case_ :List[Any] = OPTConfig() snake_case_ :str = OPTModel(_lowercase ).half().eval() model.load_state_dict(_lowercase ) # Check results Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") __a = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
66
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :Any = seq_length snake_case_ :List[str] = is_training snake_case_ :Optional[Any] = use_attention_mask snake_case_ :Dict = use_token_type_ids snake_case_ :Union[str, Any] = use_labels snake_case_ :str = vocab_size snake_case_ :int = hidden_size snake_case_ :List[str] = num_hidden_layers snake_case_ :Dict = num_attention_heads snake_case_ :Any = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Any = max_position_embeddings snake_case_ :Union[str, Any] = type_vocab_size snake_case_ :Optional[int] = type_sequence_label_size snake_case_ :Union[str, Any] = initializer_range snake_case_ :Tuple = num_choices def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ :Union[str, Any] = None if self.use_attention_mask: snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ :Any = None if self.use_token_type_ids: snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ :int = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case_ :str = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case_ :int = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs snake_case_ :Union[str, Any] = True snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = True _A : Dict = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = FlaxBertModelTester(self ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Dict: # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" ) snake_case_ :Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case )
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) snake_case_ :Optional[Any] = str(bin(_lowercase ) )[2:] # remove the leading "0b" snake_case_ :int = str(bin(_lowercase ) )[2:] # remove the leading "0b" snake_case_ :Tuple = max(len(_lowercase ), len(_lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_lowercase ), b_binary.zfill(_lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int: snake_case_ :Any = 0.0 snake_case_ :Tuple = 0.0 for i in range(len(snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]: for i in range(len(snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A_ ( ): '''simple docstring''' snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case_ :Optional[Any] = SelfOrganizingMap() snake_case_ :Dict = 3 snake_case_ :Dict = 0.5 for _ in range(_lowercase ): for j in range(len(_lowercase ) ): # training sample snake_case_ :List[Any] = training_samples[j] # Compute the winning vector snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase ) # Update the winning vector snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase ) # classify test sample snake_case_ :str = [0, 0, 0, 1] snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase ) # results print(f"""Clusters that the test sample belongs to : {winner}""" ) print(f"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
1
"""simple docstring""" from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowerCamelCase : '''simple docstring''' _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : int _A : int _A : float _A : float _A : Tuple[int] def lowerCAmelCase_ ( self: Optional[int] ) -> str: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def lowerCAmelCase_ ( self: str ) -> List[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def lowerCAmelCase_ ( self: Tuple ) -> torch.Tensor: snake_case_ :List[Any] = torch.arange(self.height * self.width ) snake_case_ :Union[str, Any] = torch.stack( [ pixel_indices % self.width, torch.div(snake_case , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case_, *snake_case_ :Dict = self.shape snake_case_ :Optional[int] = int(np.prod(snake_case ) ) snake_case_ :Union[str, Any] = self.get_image_coords() snake_case_ :Optional[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) snake_case_ :Optional[int] = self.get_camera_rays(snake_case ) snake_case_ :Optional[int] = rays.view(snake_case , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def lowerCAmelCase_ ( self: Tuple , snake_case: torch.Tensor ) -> torch.Tensor: snake_case_, *snake_case_, snake_case_ :Tuple = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] snake_case_ :Dict = coords.view(snake_case , -1 , 2 ) snake_case_ :str = self.resolution() snake_case_ :List[str] = self.fov() snake_case_ :Tuple = (flat.float() / (res - 1)) * 2 - 1 snake_case_ :Any = fracs * torch.tan(fov / 2 ) snake_case_ :Optional[int] = fracs.view(snake_case , -1 , 2 ) snake_case_ :Dict = ( self.z.view(snake_case , 1 , 3 ) + self.x.view(snake_case , 1 , 3 ) * fracs[:, :, :1] + self.y.view(snake_case , 1 , 3 ) * fracs[:, :, 1:] ) snake_case_ :Tuple = directions / directions.norm(dim=-1 , keepdim=snake_case ) snake_case_ :Optional[Any] = torch.stack( [ torch.broadcast_to(self.origin.view(snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(snake_case , *snake_case , 2 , 3 ) def lowerCAmelCase_ ( self: List[str] , snake_case: int , snake_case: int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case , height=snake_case , x_fov=self.x_fov , y_fov=self.y_fov , ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = [] snake_case_ :Optional[Any] = [] snake_case_ :Optional[Any] = [] snake_case_ :Any = [] for theta in np.linspace(0, 2 * np.pi, num=20 ): snake_case_ :Optional[int] = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) snake_case_ :List[Any] = -z * 4 snake_case_ :str = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] ) snake_case_ :Tuple = np.cross(_lowercase, _lowercase ) origins.append(_lowercase ) xs.append(_lowercase ) ys.append(_lowercase ) zs.append(_lowercase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(_lowercase, axis=0 ) ).float(), x=torch.from_numpy(np.stack(_lowercase, axis=0 ) ).float(), y=torch.from_numpy(np.stack(_lowercase, axis=0 ) ).float(), z=torch.from_numpy(np.stack(_lowercase, axis=0 ) ).float(), width=_lowercase, height=_lowercase, x_fov=0.7, y_fov=0.7, shape=(1, len(_lowercase )), )
66
"""simple docstring""" import re def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(_lowercase, _lowercase ) ) if __name__ == "__main__": __a = "0094702343221" print(is_sri_lankan_phone_number(phone))
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' return 1 if input_a == input_a else 0 def A_ ( ): '''simple docstring''' assert xnor_gate(0, 0 ) == 1 assert xnor_gate(0, 1 ) == 0 assert xnor_gate(1, 0 ) == 0 assert xnor_gate(1, 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
66
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed __a = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def A_ ( _lowercase ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :Tuple = False elif args.student_type == "gpt2": snake_case_ :Union[str, Any] = False def A_ ( _lowercase, _lowercase ): '''simple docstring''' if args.student_type == "roberta": snake_case_ :List[str] = False def A_ ( ): '''simple docstring''' snake_case_ :Union[str, Any] = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""", action="""store_true""", help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""", type=_lowercase, required=_lowercase, help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""", type=_lowercase, required=_lowercase, help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""", ) parser.add_argument( """--student_type""", type=_lowercase, choices=["""distilbert""", """roberta""", """gpt2"""], required=_lowercase, help="""The student type (DistilBERT, RoBERTa).""", ) parser.add_argument("""--student_config""", type=_lowercase, required=_lowercase, help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""", default=_lowercase, type=_lowercase, help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""", choices=["""bert""", """roberta""", """gpt2"""], required=_lowercase, help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""", type=_lowercase, required=_lowercase, help="""The teacher model.""" ) parser.add_argument("""--temperature""", default=2.0, type=_lowercase, help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""", default=0.5, type=_lowercase, help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""", default=0.0, type=_lowercase, help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""", ) parser.add_argument("""--alpha_clm""", default=0.5, type=_lowercase, help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""", default=0.0, type=_lowercase, help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""", default=0.0, type=_lowercase, help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""", action="""store_true""", help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""", default=0.15, type=_lowercase, help="""Proportion of tokens for which we need to make a prediction.""", ) parser.add_argument("""--word_mask""", default=0.8, type=_lowercase, help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""", default=0.1, type=_lowercase, help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""", default=0.1, type=_lowercase, help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""", default=0.7, type=_lowercase, help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""", ) parser.add_argument("""--token_counts""", type=_lowercase, help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""", action="""store_true""", help="""If true, compute the distillation loss only the [MLM] prediction distribution.""", ) parser.add_argument( """--freeze_pos_embs""", action="""store_true""", help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""", ) parser.add_argument( """--freeze_token_type_embds""", action="""store_true""", help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""", ) parser.add_argument("""--n_epoch""", type=_lowercase, default=3, help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""", type=_lowercase, default=5, help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""", action="""store_false""", help="""If true, group sequences that have similar length into the same batch. Default is true.""", ) parser.add_argument( """--gradient_accumulation_steps""", type=_lowercase, default=50, help="""Gradient accumulation for larger training batches.""", ) parser.add_argument("""--warmup_prop""", default=0.05, type=_lowercase, help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""", default=0.0, type=_lowercase, help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""", default=5e-4, type=_lowercase, help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""", default=1e-6, type=_lowercase, help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""", default=5.0, type=_lowercase, help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""", default=0.02, type=_lowercase, help="""Random initialization range.""" ) parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""", ) parser.add_argument( """--fp16_opt_level""", type=_lowercase, default="""O1""", help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ), ) parser.add_argument("""--n_gpu""", type=_lowercase, default=1, help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""", type=_lowercase, default=-1, help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""", type=_lowercase, default=56, help="""Random seed""" ) parser.add_argument("""--log_interval""", type=_lowercase, default=500, help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""", type=_lowercase, default=4000, help="""Checkpoint interval.""" ) snake_case_ :Tuple = parser.parse_args() sanity_checks(_lowercase ) # ARGS # init_gpu_params(_lowercase ) set_seed(_lowercase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path, """parameters.json""" ), """w""" ) as f: json.dump(vars(_lowercase ), _lowercase, indent=4 ) git_log(args.dump_path ) snake_case_, snake_case_, snake_case_ :Any = MODEL_CLASSES[args.student_type] snake_case_, snake_case_, snake_case_ :int = MODEL_CLASSES[args.teacher_type] # TOKENIZER # snake_case_ :Any = teacher_tokenizer_class.from_pretrained(args.teacher_name ) snake_case_ :Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): snake_case_ :Union[str, Any] = tokenizer.all_special_tokens.index(_lowercase ) snake_case_ :Union[str, Any] = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) snake_case_ :str = special_tok_ids snake_case_ :Any = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file, """rb""" ) as fp: snake_case_ :str = pickle.load(_lowercase ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts, """rb""" ) as fp: snake_case_ :Optional[Any] = pickle.load(_lowercase ) snake_case_ :Tuple = np.maximum(_lowercase, 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): snake_case_ :Optional[int] = 0.0 # do not predict special tokens snake_case_ :int = torch.from_numpy(_lowercase ) else: snake_case_ :List[str] = None snake_case_ :Optional[int] = LmSeqsDataset(params=_lowercase, data=_lowercase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) snake_case_ :List[Any] = student_config_class.from_pretrained(args.student_config ) snake_case_ :Union[str, Any] = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) snake_case_ :List[str] = student_model_class.from_pretrained(args.student_pretrained_weights, config=_lowercase ) else: snake_case_ :Optional[int] = student_model_class(_lowercase ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # snake_case_ :Dict = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=_lowercase ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowercase, _lowercase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowercase, _lowercase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() snake_case_ :Optional[int] = Distiller( params=_lowercase, dataset=_lowercase, token_probs=_lowercase, student=_lowercase, teacher=_lowercase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
66
1
"""simple docstring""" import re def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[int] = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(_lowercase, _lowercase ) ) if __name__ == "__main__": __a = "0094702343221" print(is_sri_lankan_phone_number(phone))
66
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: Any ) -> str: if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[Any]: # configuration for running training on smdistributed Model Parallel snake_case_ :Tuple = { """enabled""": True, """processes_per_host""": 8, } snake_case_ :List[Any] = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } snake_case_ :Tuple = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} snake_case_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version="""py36""" , ) def lowerCAmelCase_ ( self: Any , snake_case: Tuple ) -> List[str]: TrainingJobAnalytics(snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self: Dict , snake_case: Dict ) -> List[Any]: # create estimator snake_case_ :List[Any] = self.create_estimator(snake_case ) # run training estimator.fit() # result dataframe snake_case_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ :Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) snake_case_ :Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ :int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case )
66
1
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness __a = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" __a = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" __a = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" __a = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" __a = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: Tuple , snake_case: Optional[int]=[1, 10, 100] , snake_case: Dict=4 , snake_case: List[str]=3.0 ) -> List[Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=snake_case ) as executor: snake_case_ :Union[str, Any] = [] snake_case_ :Optional[Any] = Counter() snake_case_ :List[Any] = 0 snake_case_ :Optional[Any] = defaultdict(snake_case ) for task_id, (candidates, test_case) in enumerate(zip(snake_case , snake_case ) ): for candidate in candidates: snake_case_ :Dict = candidate + """\n""" + test_case snake_case_ :List[Any] = (test_program, timeout, task_id, completion_id[task_id]) snake_case_ :List[Any] = executor.submit(snake_case , *snake_case ) futures.append(snake_case ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(snake_case ): snake_case_ :Any = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) snake_case_, snake_case_ :Union[str, Any] = [], [] for result in results.values(): result.sort() snake_case_ :Dict = [r[1]["""passed"""] for r in result] total.append(len(snake_case ) ) correct.append(sum(snake_case ) ) snake_case_ :Union[str, Any] = np.array(snake_case ) snake_case_ :Any = np.array(snake_case ) snake_case_ :int = k snake_case_ :str = {f"""pass@{k}""": estimate_pass_at_k(snake_case , snake_case , snake_case ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' def estimator(_lowercase, _lowercase, _lowercase ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) ) if isinstance(_lowercase, _lowercase ): snake_case_ :List[Any] = itertools.repeat(_lowercase, len(_lowercase ) ) else: assert len(_lowercase ) == len(_lowercase ) snake_case_ :Optional[int] = iter(_lowercase ) return np.array([estimator(int(_lowercase ), int(_lowercase ), _lowercase ) for n, c in zip(_lowercase, _lowercase )] )
66
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : '''simple docstring''' def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict: snake_case_ :Dict = parent snake_case_ :List[Any] = batch_size snake_case_ :Dict = image_size snake_case_ :Dict = patch_size snake_case_ :Tuple = num_channels snake_case_ :List[Any] = embed_dim snake_case_ :List[str] = depths snake_case_ :str = num_heads snake_case_ :Tuple = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :int = qkv_bias snake_case_ :Tuple = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Dict = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Any = use_absolute_embeddings snake_case_ :int = patch_norm snake_case_ :List[Any] = layer_norm_eps snake_case_ :Tuple = initializer_range snake_case_ :str = is_training snake_case_ :int = scope snake_case_ :Tuple = use_labels snake_case_ :Tuple = type_sequence_label_size snake_case_ :str = encoder_stride snake_case_ :List[Any] = out_features snake_case_ :str = out_indices def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :str = None if self.use_labels: snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: int ) -> Optional[Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any: snake_case_ :Dict = MaskFormerSwinModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]: snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[Any] = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(snake_case ): snake_case_ :Optional[Any] = ["""stem"""] snake_case_ :str = MaskFormerSwinBackbone(config=snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_ :Optional[int] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :str = config_and_inputs snake_case_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Union[str, Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) _A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {} _A : List[str] = False _A : Any = False _A : Dict = False _A : List[Any] = False _A : Optional[int] = False def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case_ :str = MaskFormerSwinModelTester(self ) snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( """`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with""" """ `nn.DataParallel`""" ) ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Any ) -> Tuple: return def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) @unittest.skip("""Swin does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: str ) -> List[str]: pass @unittest.skip("""Swin does not support feedforward chunking""" ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :str = [*signature.parameters.keys()] snake_case_ :str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) @unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: pass @unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" ) def lowerCAmelCase_ ( self: Dict ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str: snake_case_ :List[str] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :Any = outputs.hidden_states snake_case_ :Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swin has a different seq_length snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[Any] = 3 snake_case_ :List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Any = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) @unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: List[str] ) -> str: pass @unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" ) def lowerCAmelCase_ ( self: str ) -> List[Any]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(snake_case: str ): snake_case_ :Optional[int] = 0 return t def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ): with torch.no_grad(): snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case ) snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple() def recursive_check(snake_case: List[Any] , snake_case: int ): if isinstance(snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ): recursive_check(snake_case , snake_case ) elif isinstance(snake_case , snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(snake_case , snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=( """Tuple and dict output are not equal. Difference:""" f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has""" f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}.""" ) , ) recursive_check(snake_case , snake_case ) for model_class in self.all_model_classes: snake_case_ :int = model_class(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case ) snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case ) snake_case_ :Any = self._prepare_for_class(snake_case , snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} ) @require_torch class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ): '''simple docstring''' _A : int = (MaskFormerSwinBackbone,) if is_torch_available() else () _A : Tuple = MaskFormerSwinConfig def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self ) def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0] for backbone_class in self.all_model_classes: snake_case_ :List[str] = backbone_class(snake_case ) backbone.to(snake_case ) backbone.eval() snake_case_ :List[Any] = backbone(**snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case ) self.assertIsNotNone(outputs.attentions )
66
1
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def A_ ( ): '''simple docstring''' snake_case_, snake_case_ :Tuple = 9, 14 # noqa: F841 snake_case_ :Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] snake_case_ :Any = defaultdict(_lowercase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) snake_case_ :Union[str, Any] = mst(_lowercase ) snake_case_ :List[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: snake_case_ :Optional[Any] = tuple(answer[:2] ) snake_case_ :Dict = tuple(edge[::-1] ) assert edge in result or reverse in result
66
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __a = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case_ :List[str] = 4 snake_case_ :Tuple = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: List[str] ) -> Dict: return (3, 32, 32) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (3, 32, 32) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } snake_case_ :Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = UNetaDModel _A : Union[str, Any] = """sample""" @property def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 4 snake_case_ :int = (32, 32) snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return (4, 32, 32) @property def lowerCAmelCase_ ( self: List[Any] ) -> int: return (4, 32, 32) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: snake_case_ :Dict = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } snake_case_ :List[str] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: Tuple ) -> Dict: snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model.to(snake_case ) snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self: str ) -> Any: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case ) model_accelerate.to(snake_case ) model_accelerate.eval() snake_case_ :List[Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case ) snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() snake_case_, snake_case_ :str = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case ) model_normal_load.to(snake_case ) model_normal_load.eval() snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""] assert torch_all_close(snake_case , snake_case , rtol=1E-3 ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(snake_case ) snake_case_ :Optional[int] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ :int = noise.to(snake_case ) snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case ) with torch.no_grad(): snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) ) class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[Any] = UNetaDModel _A : List[Any] = """sample""" @property def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple: snake_case_ :Union[str, Any] = 4 snake_case_ :Any = 3 snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: return (3, 32, 32) @property def lowerCAmelCase_ ( self: int ) -> Tuple: return (3, 32, 32) def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case_ :List[Any] = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } snake_case_ :int = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case ) self.assertIsNotNone(snake_case ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case ) snake_case_ :Any = self.dummy_input snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case ) snake_case_ :int = noise snake_case_ :int = model(**snake_case ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self: str ) -> Dict: snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(snake_case ) snake_case_ :List[str] = 4 snake_case_ :Optional[int] = 3 snake_case_ :List[str] = (256, 256) snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :Dict = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(snake_case ) snake_case_ :Optional[int] = 4 snake_case_ :Optional[Any] = 3 snake_case_ :Optional[Any] = (32, 32) snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case ) snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case ) with torch.no_grad(): snake_case_ :str = model(snake_case , snake_case ).sample snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: # not required for this model pass
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) snake_case_ :Union[str, Any] = str(bin(_lowercase ) ) binary_number += "0" * shift_amount return binary_number def A_ ( _lowercase, _lowercase ): '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) snake_case_ :Optional[int] = str(bin(_lowercase ) )[2:] if shift_amount >= len(_lowercase ): return "0b0" snake_case_ :Optional[int] = binary_number[: len(_lowercase ) - shift_amount] return "0b" + shifted_binary_number def A_ ( _lowercase, _lowercase ): '''simple docstring''' if number >= 0: # Get binary representation of positive number snake_case_ :Any = """0""" + str(bin(_lowercase ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number snake_case_ :Dict = len(bin(_lowercase )[3:] ) # Find 2's complement of number snake_case_ :Optional[int] = bin(abs(_lowercase ) - (1 << binary_number_length) )[3:] snake_case_ :Any = ( """1""" + """0""" * (binary_number_length - len(_lowercase )) + binary_number ) if shift_amount >= len(_lowercase ): return "0b" + binary_number[0] * len(_lowercase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(_lowercase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
66
1
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[int] = FunnelTokenizer _A : Tuple = FunnelTokenizerFast _A : Any = True _A : str = True def lowerCAmelCase_ ( self: Any ) -> List[Any]: super().setUp() snake_case_ :List[Any] = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowerCAmelCase_ ( self: Union[str, Any] , **snake_case: Optional[Any] ) -> Union[str, Any]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: List[str] , **snake_case: int ) -> Optional[Any]: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: List[Any] , snake_case: List[str] ) -> List[Any]: snake_case_ :List[str] = """UNwant\u00E9d,running""" snake_case_ :Optional[Any] = """unwanted, running""" return input_text, output_text def lowerCAmelCase_ ( self: Dict ) -> Tuple: snake_case_ :List[Any] = self.tokenizer_class(self.vocab_file ) snake_case_ :str = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] ) def lowerCAmelCase_ ( self: List[Any] ) -> Tuple: snake_case_ :Tuple = self.get_tokenizers(do_lower_case=snake_case ) for tokenizer in tokenizers: snake_case_ :List[str] = tokenizer("""UNwant\u00E9d,running""" ) snake_case_ :int = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) snake_case_ :int = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
66
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = StableDiffusionSAGPipeline _A : Optional[Any] = TEXT_TO_IMAGE_PARAMS _A : Any = TEXT_TO_IMAGE_BATCH_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS _A : List[str] = False def lowerCAmelCase_ ( self: Optional[Any] ) -> str: torch.manual_seed(0 ) snake_case_ :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ :Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) snake_case_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ :Tuple = CLIPTextModel(snake_case ) snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ :Dict = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str: if str(snake_case ).startswith("""mps""" ): snake_case_ :Tuple = torch.manual_seed(snake_case ) else: snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case ) snake_case_ :Any = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self: Optional[int] ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: int ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Union[str, Any] = """.""" snake_case_ :str = torch.manual_seed(0 ) snake_case_ :str = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :List[Any] = output.images snake_case_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: Dict ) -> str: snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :Optional[int] = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Union[str, Any] = torch.manual_seed(0 ) snake_case_ :Tuple = sag_pipe( [prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) snake_case_ :Optional[int] = output.images snake_case_ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) snake_case_ :int = sag_pipe.to(snake_case ) sag_pipe.set_progress_bar_config(disable=snake_case ) snake_case_ :Tuple = """.""" snake_case_ :Optional[int] = torch.manual_seed(0 ) snake_case_ :List[str] = sag_pipe( [prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) snake_case_ :Optional[Any] = output.images assert image.shape == (1, 512, 768, 3)
66
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(_lowercase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
66
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class lowerCamelCase : '''simple docstring''' def __init__( self: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = {} def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None: snake_case_ :str = {} def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None: if nodea not in self.connections: self.add_node(snake_case ) if nodea not in self.connections: self.add_node(snake_case ) snake_case_ :Dict = probability def lowerCAmelCase_ ( self: List[Any] ) -> list[str]: return list(self.connections ) def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str: snake_case_ :Optional[Any] = 0 snake_case_ :List[str] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowercase, _lowercase, _lowercase ) snake_case_ :int = Counter(graph.get_nodes() ) snake_case_ :Optional[Any] = start for _ in range(_lowercase ): snake_case_ :Tuple = graph.transition(_lowercase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __a = Lock() def A_ ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0, 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_lowercase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() snake_case_ :Optional[Any] = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left snake_case_ :List[str] = min(_lowercase, _lowercase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_lowercase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() snake_case_ :Optional[Any] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right snake_case_ :Any = max(_lowercase, _lowercase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = [] snake_case_ :Any = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop snake_case_ :Any = Pipe() snake_case_ :Dict = Pipe() process_array_.append( Process( target=_lowercase, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) ) snake_case_ :Optional[int] = temp_rs snake_case_ :str = temp_rr for i in range(1, len(_lowercase ) - 1 ): snake_case_ :List[str] = Pipe() snake_case_ :str = Pipe() process_array_.append( Process( target=_lowercase, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) ) snake_case_ :int = temp_rs snake_case_ :List[Any] = temp_rr process_array_.append( Process( target=_lowercase, args=( len(_lowercase ) - 1, arr[len(_lowercase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_lowercase ) - 1], ), ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0, len(_lowercase ) ): snake_case_ :int = result_pipe[p][0].recv() process_array_[p].join() return arr def A_ ( ): '''simple docstring''' snake_case_ :Any = list(range(10, 0, -1 ) ) print("""Initial List""" ) print(*_lowercase ) snake_case_ :Union[str, Any] = odd_even_transposition(_lowercase ) print("""Sorted List\n""" ) print(*_lowercase ) if __name__ == "__main__": main()
66
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __a = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __a = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase ) return [m.group(0 ) for m in matches] def A_ ( ): '''simple docstring''' snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ :Dict = { config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. snake_case_ :Optional[Any] = collections.defaultdict(_lowercase ) snake_case_ :int = collections.defaultdict(_lowercase ) snake_case_ :List[str] = collections.defaultdict(_lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(_lowercase ): snake_case_ :int = None if _re_tf_models.match(_lowercase ) is not None: snake_case_ :int = tf_models snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0] elif _re_flax_models.match(_lowercase ) is not None: snake_case_ :List[Any] = flax_models snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0] elif _re_pt_models.match(_lowercase ) is not None: snake_case_ :Optional[Any] = pt_models snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0] if lookup_dict is not None: while len(_lowercase ) > 0: if attr_name in model_prefix_to_model_type: snake_case_ :Optional[int] = True break # Try again after removing the last word in the name snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] ) snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) snake_case_ :Optional[Any] = list(_lowercase ) all_models.sort() snake_case_ :Optional[int] = {"""model_type""": all_models} snake_case_ :Optional[int] = [pt_models[t] for t in all_models] snake_case_ :Any = [tf_models[t] for t in all_models] snake_case_ :Dict = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure snake_case_ :Dict = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: snake_case_ :Optional[Any] = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: snake_case_ :Tuple = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: snake_case_ :Tuple = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. snake_case_ :str = """AutoTokenizer""" snake_case_ :int = [processors[t] for t in all_models] return pd.DataFrame(_lowercase ) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ): # The type of pipeline may not exist in this framework if not hasattr(_lowercase, _lowercase ): continue # First extract all model_names snake_case_ :Tuple = [] for name in getattr(_lowercase, _lowercase ).values(): if isinstance(_lowercase, _lowercase ): model_names.append(_lowercase ) else: model_names.extend(list(_lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :List[Any] = get_frameworks_table() snake_case_ :str = Dataset.from_pandas(_lowercase ) snake_case_ :List[Any] = hf_hub_download( """huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase ) snake_case_ :List[str] = Dataset.from_json(_lowercase ) snake_case_ :int = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(_lowercase ) ) } snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. snake_case_ :Tuple = sorted(table.keys() ) snake_case_ :Tuple = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) ) if commit_sha is not None: snake_case_ :Union[str, Any] = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: snake_case_ :List[Any] = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, ) def A_ ( ): '''simple docstring''' snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS snake_case_ :List[str] = [] for key in pipeline_tasks: if key not in in_table: snake_case_ :int = pipeline_tasks[key]["""pt"""] if isinstance(_lowercase, (list, tuple) ): snake_case_ :Any = model[0] snake_case_ :str = model.__name__ if model not in in_table.values(): missing.append(_lowercase ) if len(_lowercase ) > 0: snake_case_ :Optional[int] = """, """.join(_lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __a = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
66
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __a = { "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoXJapaneseForCausalLM", "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
"""simple docstring""" import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __a = logging.getLogger(__name__) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Union[str, Any] = """token-classification""" def __init__( self: Any , snake_case: Tuple ) -> List[Any]: if type(snake_case ) == dict: snake_case_ :Optional[int] = Namespace(**snake_case ) snake_case_ :Optional[int] = import_module("""tasks""" ) try: snake_case_ :Any = getattr(snake_case , hparams.task_type ) snake_case_ :TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels ) snake_case_ :str = CrossEntropyLoss().ignore_index super().__init__(snake_case , len(self.labels ) , self.mode ) def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any: return self.model(**snake_case ) def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]: snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :List[str] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Optional[Any] = self(**snake_case ) snake_case_ :List[str] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_ :List[Any] = self.hparams for mode in ["train", "dev", "test"]: snake_case_ :Optional[int] = self._feature_file(snake_case ) if os.path.exists(snake_case ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :Optional[int] = torch.load(snake_case ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case ) snake_case_ :Any = self.token_classification_task.convert_examples_to_features( snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , snake_case ) torch.save(snake_case , snake_case ) def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader: snake_case_ :int = self._feature_file(snake_case ) logger.info("""Loading features from cached file %s""" , snake_case ) snake_case_ :str = torch.load(snake_case ) snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]: """Compute validation""" "" snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": snake_case_ :Dict = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case_ :Dict = self(**snake_case ) snake_case_, snake_case_ :Dict = outputs[:2] snake_case_ :Union[str, Any] = logits.detach().cpu().numpy() snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple: snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) snake_case_ :Tuple = np.argmax(snake_case , axis=2 ) snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) snake_case_ :Optional[Any] = dict(enumerate(self.labels ) ) snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) snake_case_ :str = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(snake_case , snake_case ), """precision""": precision_score(snake_case , snake_case ), """recall""": recall_score(snake_case , snake_case ), """f1""": fa_score(snake_case , snake_case ), } snake_case_ :List[Any] = dict(results.items() ) snake_case_ :Union[str, Any] = results return ret, preds_list, out_label_list def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]: # when stable snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case ) snake_case_ :str = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any: # updating to test_epoch_end instead of deprecated test_end snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 snake_case_ :Optional[int] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict: # Add NER specific options BaseTransformer.add_model_specific_args(snake_case , snake_case ) parser.add_argument( """--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=snake_case , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __a = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __a = NERTransformer.add_model_specific_args(parser, os.getcwd()) __a = parser.parse_args() __a = NERTransformer(args) __a = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __a = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
66
1
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. __a = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n" class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case_ :Optional[int] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) ) snake_case_ :Any = self.transformer_dir shutil.copy( os.path.join(snake_case , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , ) def lowerCAmelCase_ ( self: Optional[int] ) -> str: snake_case_ :Union[str, Any] = """src/transformers""" shutil.rmtree(self.transformer_dir ) def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: int , snake_case: List[Any] , snake_case: Dict=None ) -> str: snake_case_ :Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: snake_case_ :Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result snake_case_ :List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) snake_case_ :List[Any] = black.format_str(snake_case , mode=snake_case ) snake_case_ :List[Any] = os.path.join(self.transformer_dir , """new_code.py""" ) with open(snake_case , """w""" , newline="""\n""" ) as f: f.write(snake_case ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=snake_case ) with open(snake_case , """r""" ) as f: self.assertTrue(f.read() , snake_case ) def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :int = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" ) self.assertEqual(snake_case , snake_case ) def lowerCAmelCase_ ( self: int ) -> str: # Base copy consistency self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , snake_case , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , snake_case ) , ) # Copy consistency with a really long name snake_case_ :str = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , snake_case , snake_case ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , snake_case , overwrite_result=re.sub("""Bert""" , """TestModel""" , snake_case ) , ) def lowerCAmelCase_ ( self: str ) -> List[str]: snake_case_ :int = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""] snake_case_ :Union[str, Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),""" """ released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**""" """ (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders""" """ as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang""" """ Luong, Quoc V. Le, Christopher D. Manning.""" ) snake_case_ :Optional[int] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) snake_case_ :Optional[Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文""" """ [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自""" """ Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather""" """ than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,""" """ Christopher D. Manning 发布。\n""" ) snake_case_, snake_case_ :Tuple = check_copies.convert_to_localized_md( snake_case , snake_case , localized_readme["""format_model_list"""] ) self.assertFalse(snake_case ) self.assertEqual(snake_case , snake_case ) snake_case_, snake_case_ :Optional[Any] = check_copies.convert_to_localized_md( snake_case , snake_case , localized_readme["""format_model_list"""] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(snake_case ) snake_case_ :Optional[Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.""" ) snake_case_ :Any = ( """1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and""" """ the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) snake_case_ :Any = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) snake_case_, snake_case_ :Optional[Any] = check_copies.convert_to_localized_md( snake_case , snake_case , localized_readme["""format_model_list"""] ) # Check if the model link is synchronized. self.assertEqual(snake_case , snake_case )
66
"""simple docstring""" from math import factorial class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple: snake_case_ :List[Any] = real if isinstance(snake_case , snake_case ): snake_case_ :Tuple = [1] * rank else: snake_case_ :Optional[Any] = rank def __repr__( self: List[str] ) -> Tuple: return ( f"""{self.real}+""" f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: snake_case_ :Any = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case ) def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]: if not isinstance(snake_case , snake_case ): return Dual(self.real + other , self.duals ) snake_case_ :List[Any] = self.duals.copy() snake_case_ :Tuple = other.duals.copy() if len(snake_case ) > len(snake_case ): o_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) elif len(snake_case ) < len(snake_case ): s_dual.extend([1] * (len(snake_case ) - len(snake_case )) ) snake_case_ :Dict = [] for i in range(len(snake_case ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case ) _A : str = __add__ def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple: return self + other * -1 def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]: if not isinstance(snake_case , snake_case ): snake_case_ :Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case ) snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case ) _A : int = __mul__ def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case ) raise ValueError def __floordiv__( self: int , snake_case: List[Any] ) -> Any: if not isinstance(snake_case , snake_case ): snake_case_ :Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case ) raise ValueError def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]: if n < 0 or isinstance(snake_case , snake_case ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self snake_case_ :str = self for _ in range(n - 1 ): x *= self return x def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if not callable(_lowercase ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(_lowercase, (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(_lowercase, _lowercase ): raise ValueError("""differentiate() requires an int as input for order""" ) snake_case_ :Optional[Any] = Dual(_lowercase, 1 ) snake_case_ :List[Any] = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def A_ ( _lowercase ): '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
66
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' snake_case_ :int = len(_lowercase ) snake_case_ :List[Any] = len(matrix[0] ) snake_case_ :Optional[int] = min(_lowercase, _lowercase ) for row in range(_lowercase ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1, _lowercase ): snake_case_ :int = matrix[col][row] / matrix[row][row] for i in range(_lowercase, _lowercase ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows snake_case_ :Any = True for i in range(row + 1, _lowercase ): if matrix[i][row] != 0: snake_case_, snake_case_ :int = matrix[i], matrix[row] snake_case_ :Dict = False break if reduce: rank -= 1 for i in range(_lowercase ): snake_case_ :Dict = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
66
"""simple docstring""" from __future__ import annotations __a = 10 def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = 1 snake_case_ :List[str] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets snake_case_ :list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: snake_case_ :Any = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints snake_case_ :Optional[Any] = 0 for b in range(_lowercase ): for i in buckets[b]: snake_case_ :Union[str, Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
66
1
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def A_ ( _lowercase ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = np.max(_outputs, axis=-1, keepdims=_lowercase ) snake_case_ :Optional[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowercase ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : List[str] = """sigmoid""" _A : Union[str, Any] = """softmax""" _A : Tuple = """none""" @add_end_docstrings( _lowerCAmelCase , R""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Optional[int] = False _A : Any = ClassificationFunction.NONE def __init__( self: Tuple , **snake_case: Any ) -> List[Any]: super().__init__(**snake_case ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def lowerCAmelCase_ ( self: str , snake_case: str=None , snake_case: Any=None , snake_case: Union[str, Any]="" , **snake_case: List[str] ) -> Union[str, Any]: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" snake_case_ :List[Any] = tokenizer_kwargs snake_case_ :int = {} if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None: snake_case_ :List[str] = self.model.config.return_all_scores if isinstance(snake_case , snake_case ) or top_k is None: snake_case_ :Optional[int] = top_k snake_case_ :Optional[Any] = False elif return_all_scores is not None: warnings.warn( """`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of""" """ `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , snake_case , ) if return_all_scores: snake_case_ :str = None else: snake_case_ :Tuple = 1 if isinstance(snake_case , snake_case ): snake_case_ :List[Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case_ :Tuple = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self: int , *snake_case: List[str] , **snake_case: List[Any] ) -> Union[str, Any]: snake_case_ :str = super().__call__(*snake_case , **snake_case ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case_ :Any = """top_k""" not in kwargs if isinstance(args[0] , snake_case ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def lowerCAmelCase_ ( self: Tuple , snake_case: str , **snake_case: int ) -> Dict[str, GenericTensor]: snake_case_ :int = self.framework if isinstance(snake_case , snake_case ): return self.tokenizer(**snake_case , return_tensors=snake_case , **snake_case ) elif isinstance(snake_case , snake_case ) and len(snake_case ) == 1 and isinstance(inputs[0] , snake_case ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case , **snake_case ) elif isinstance(snake_case , snake_case ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( """The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a""" """ dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" ) return self.tokenizer(snake_case , return_tensors=snake_case , **snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[str] ) -> List[str]: return self.model(**snake_case ) def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] , snake_case: Optional[int]=None , snake_case: List[Any]=1 , snake_case: str=True ) -> int: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case_ :Optional[Any] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case_ :Union[str, Any] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None: snake_case_ :int = self.model.config.function_to_apply else: snake_case_ :Tuple = ClassificationFunction.NONE snake_case_ :str = model_outputs["""logits"""][0] snake_case_ :List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case_ :List[str] = sigmoid(snake_case ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case_ :Any = softmax(snake_case ) elif function_to_apply == ClassificationFunction.NONE: snake_case_ :Tuple = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case_ :Union[str, Any] = [ {"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(snake_case ) ] if not _legacy: dict_scores.sort(key=lambda snake_case : x["score"] , reverse=snake_case ) if top_k is not None: snake_case_ :int = dict_scores[:top_k] return dict_scores
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def A_ ( ): '''simple docstring''' snake_case_ :int = 9 snake_case_ :Tuple = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] snake_case_ :Union[str, Any] = kruskal(_lowercase, _lowercase ) snake_case_ :Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_lowercase ) == sorted(_lowercase )
66
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: List[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :Union[str, Any] = controlnet_params snake_case_ :Union[str, Any] = """bird""" snake_case_ :List[Any] = jax.device_count() snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples ) snake_case_ :Any = jax.random.PRNGKey(0 ) snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() ) snake_case_ :List[Any] = replicate(snake_case ) snake_case_ :List[str] = shard(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :Dict = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1] snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Dict = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self: int ) -> Dict: snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa ) snake_case_ :str = controlnet_params snake_case_ :Optional[int] = """Chef in the kitchen""" snake_case_ :Union[str, Any] = jax.device_count() snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples ) snake_case_ :str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) snake_case_ :str = jax.random.PRNGKey(0 ) snake_case_ :str = jax.random.split(snake_case , jax.device_count() ) snake_case_ :Tuple = replicate(snake_case ) snake_case_ :str = shard(snake_case ) snake_case_ :int = shard(snake_case ) snake_case_ :List[str] = pipe( prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ :int = images[0, 253:256, 253:256, -1] snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ :Optional[int] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
66
1
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = 0 if start < end: snake_case_ :Dict = randint(_lowercase, _lowercase ) snake_case_ :List[Any] = a[end] snake_case_ :str = a[pivot] snake_case_ :List[Any] = temp snake_case_, snake_case_ :Any = _in_place_partition(_lowercase, _lowercase, _lowercase ) count += _in_place_quick_sort(_lowercase, _lowercase, p - 1 ) count += _in_place_quick_sort(_lowercase, p + 1, _lowercase ) return count def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :Dict = 0 snake_case_ :int = randint(_lowercase, _lowercase ) snake_case_ :Optional[int] = a[end] snake_case_ :List[str] = a[pivot] snake_case_ :Any = temp snake_case_ :str = start - 1 for index in range(_lowercase, _lowercase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value snake_case_ :Optional[int] = new_pivot_index + 1 snake_case_ :Tuple = a[new_pivot_index] snake_case_ :Tuple = a[index] snake_case_ :Tuple = temp snake_case_ :int = a[new_pivot_index + 1] snake_case_ :Union[str, Any] = a[end] snake_case_ :str = temp return new_pivot_index + 1, count __a = TemporaryFile() __a = 1_00 # 1000 elements are to be sorted __a , __a = 0, 1 # mean and standard deviation __a = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array __a = np.load(outfile) __a = len(M) - 1 __a = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", "BridgeTower/bridgetower-base-itm-mlm": ( "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json" ), } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : List[str] = """bridgetower_vision_model""" def __init__( self: str , snake_case: int=768 , snake_case: Optional[int]=12 , snake_case: List[str]=3 , snake_case: str=16 , snake_case: Any=288 , snake_case: Tuple=1 , snake_case: str=1E-05 , snake_case: Optional[Any]=False , snake_case: Dict=True , snake_case: Tuple=False , **snake_case: Tuple , ) -> Any: super().__init__(**snake_case ) snake_case_ :List[str] = hidden_size snake_case_ :Dict = num_hidden_layers snake_case_ :Any = num_channels snake_case_ :List[Any] = patch_size snake_case_ :List[str] = image_size snake_case_ :Optional[int] = initializer_factor snake_case_ :List[Any] = layer_norm_eps snake_case_ :List[Any] = stop_gradient snake_case_ :Any = share_layernorm snake_case_ :Dict = remove_last_layer @classmethod def lowerCAmelCase_ ( cls: Optional[int] , snake_case: Union[str, os.PathLike] , **snake_case: int ) -> "PretrainedConfig": snake_case_, snake_case_ :str = cls.get_config_dict(snake_case , **snake_case ) if config_dict.get("""model_type""" ) == "bridgetower": snake_case_ :Optional[Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case , **snake_case ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Optional[Any] = """bridgetower_text_model""" def __init__( self: List[str] , snake_case: Optional[int]=50_265 , snake_case: str=768 , snake_case: Optional[int]=12 , snake_case: Optional[Any]=12 , snake_case: Tuple=1 , snake_case: Dict=3_072 , snake_case: str="gelu" , snake_case: List[Any]=0.1 , snake_case: Dict=0.1 , snake_case: Union[str, Any]=514 , snake_case: Optional[Any]=1 , snake_case: Dict=1E-05 , snake_case: Optional[int]=1 , snake_case: Tuple=0 , snake_case: Tuple=2 , snake_case: Optional[Any]="absolute" , snake_case: List[Any]=True , **snake_case: Union[str, Any] , ) -> List[Any]: super().__init__(**snake_case ) snake_case_ :Optional[Any] = vocab_size snake_case_ :Dict = hidden_size snake_case_ :str = num_hidden_layers snake_case_ :Optional[int] = num_attention_heads snake_case_ :Union[str, Any] = hidden_act snake_case_ :str = initializer_factor snake_case_ :str = intermediate_size snake_case_ :Optional[int] = hidden_dropout_prob snake_case_ :str = attention_probs_dropout_prob snake_case_ :Tuple = max_position_embeddings snake_case_ :int = type_vocab_size snake_case_ :Tuple = layer_norm_eps snake_case_ :Dict = position_embedding_type snake_case_ :Optional[Any] = use_cache snake_case_ :int = pad_token_id snake_case_ :Optional[int] = bos_token_id snake_case_ :Dict = eos_token_id @classmethod def lowerCAmelCase_ ( cls: int , snake_case: Union[str, os.PathLike] , **snake_case: Optional[Any] ) -> "PretrainedConfig": snake_case_, snake_case_ :Union[str, Any] = cls.get_config_dict(snake_case , **snake_case ) if config_dict.get("""model_type""" ) == "bridgetower": snake_case_ :List[str] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case , **snake_case ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = """bridgetower""" def __init__( self: int , snake_case: Dict=True , snake_case: Tuple="gelu" , snake_case: str=768 , snake_case: Tuple=1 , snake_case: Any=1E-05 , snake_case: int=False , snake_case: Optional[Any]="add" , snake_case: str=12 , snake_case: Any=6 , snake_case: Any=False , snake_case: List[str]=False , snake_case: str=None , snake_case: List[Any]=None , **snake_case: Tuple , ) -> int: # TODO: remove this once the Hub files are updated. snake_case_ :Optional[Any] = kwargs.pop("""text_config_dict""" , snake_case ) snake_case_ :List[Any] = kwargs.pop("""vision_config_dict""" , snake_case ) super().__init__(**snake_case ) snake_case_ :List[str] = share_cross_modal_transformer_layers snake_case_ :Union[str, Any] = hidden_act snake_case_ :Union[str, Any] = hidden_size snake_case_ :Dict = initializer_factor snake_case_ :Any = layer_norm_eps snake_case_ :List[Any] = share_link_tower_layers snake_case_ :int = link_tower_type snake_case_ :str = num_attention_heads snake_case_ :Dict = num_hidden_layers snake_case_ :List[Any] = tie_word_embeddings snake_case_ :Tuple = init_layernorm_from_vision_encoder if text_config is None: snake_case_ :Union[str, Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: snake_case_ :Optional[int] = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) snake_case_ :Optional[int] = BridgeTowerTextConfig(**snake_case ) snake_case_ :List[str] = BridgeTowerVisionConfig(**snake_case ) @classmethod def lowerCAmelCase_ ( cls: List[str] , snake_case: BridgeTowerTextConfig , snake_case: BridgeTowerVisionConfig , **snake_case: str ) -> Dict: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> int: snake_case_ :Tuple = copy.deepcopy(self.__dict__ ) snake_case_ :Union[str, Any] = self.text_config.to_dict() snake_case_ :Union[str, Any] = self.vision_config.to_dict() snake_case_ :List[Any] = self.__class__.model_type return output
66
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" ) snake_case_ :Any = json.loads(open(_lowercase ).read() ) if not params: raise ValueError( f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" ) if not args.output.endswith(""".pt""" ): snake_case_ :Optional[int] = args.output + """.pt""" snake_case_ :List[str] = OrderedDict() with tf.device("""/CPU:0""" ): snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir ) snake_case_ :str = reader.get_variable_to_shape_map() for key_name in shapes.keys(): snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): snake_case_ :Any = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): snake_case_ :Optional[int] = 8 snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :List[str] = torch.tensor(_lowercase ) elif key_name.startswith("""model/moe""" ): snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/softmlp/kernel""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): snake_case_ :Dict = key_name[-9:-7] for i in range(16 ): snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) snake_case_ :Tuple = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/mlp""" ): snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p1/bias""" ): snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player snake_case_ :str = vnp.copy() # same because it is one dimensional snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/kernel""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/p2/bias""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player snake_case_ :Any = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif key_name.startswith("""model/ln""" ): snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :int = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.startswith("""model/att""" ): snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum snake_case_ :Dict = state[:, 0, :, :] snake_case_ :int = state[:, 1, :, :] snake_case_ :List[str] = state[:, 2, :, :] snake_case_ :str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Optional[int] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player snake_case_ :int = torch.tensor(_lowercase ) snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player snake_case_ :Dict = torch.tensor(_lowercase ) snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player snake_case_ :Optional[Any] = torch.tensor(_lowercase ) elif key_name.endswith("""/o/kernel""" ): snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player snake_case_ :str = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ :Any = torch.tensor(_lowercase ) elif key_name.startswith("""model/an""" ): snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional snake_case_ :Tuple = torch.tensor(_lowercase ) elif key_name.endswith("""/g""" ): snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player snake_case_ :Dict = vnp.copy() # same because it is one dimensional snake_case_ :Optional[int] = torch.tensor(_lowercase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) if key_name.startswith("""model/wte""" ): snake_case_ :Tuple = """lm_head.weight""" snake_case_ :List[str] = vnp.copy() # same in embedded snake_case_ :List[Any] = torch.tensor(_lowercase ) elif key_name.startswith("""model/wob""" ): snake_case_ :str = """final_logits_bias""" snake_case_ :Any = vnp.copy() # same in embedded snake_case_ :List[Any] = state.reshape((1, -1) ) snake_case_ :Union[str, Any] = torch.tensor(_lowercase ) elif key_name == "model/dense/kernel": snake_case_ :str = """model.last_project.weight""" snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ :int = torch.tensor(_lowercase ) elif key_name == "model/dense_1/bias": snake_case_ :Optional[int] = """model.last_project.bias""" snake_case_ :Tuple = vnp.copy() # same because it is one dimensional snake_case_ :Any = torch.tensor(_lowercase ) torch.save(_lowercase, args.output ) if __name__ == "__main__": __a = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") __a = parser.parse_args() convert_tf_gptsan_to_pt(args)
66
1
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class lowerCamelCase : '''simple docstring''' pass
66
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __a = pd.read_csv("sample_data.csv", header=None) __a = df.shape[:1][0] # If you're using some other dataset input the target column __a = df.iloc[:, 1:2] __a = actual_data.values.reshape(len_data, 1) __a = MinMaxScaler().fit_transform(actual_data) __a = 10 __a = 5 __a = 20 __a = len_data - periods * look_back __a = actual_data[:division] __a = actual_data[division - look_back :] __a , __a = [], [] __a , __a = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __a = np.array(train_x) __a = np.array(test_x) __a = np.array([list(i.ravel()) for i in train_y]) __a = np.array([list(i.ravel()) for i in test_y]) __a = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") __a = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) __a = model.predict(x_test)
66
1
"""simple docstring""" import operator as op def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Tuple = [] snake_case_ :Dict = lambda _lowercase, _lowercase : int(x / y ) # noqa: E731 integer division operation snake_case_ :List[str] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ), """Action""".center(12 ), """Stack""", sep=""" | """ ) print("""-""" * (30 + len(_lowercase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_lowercase ) # append x to stack # output in tabular format print(x.rjust(8 ), ("""push(""" + x + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """ ) else: snake_case_ :str = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ), ("""pop(""" + b + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """ ) snake_case_ :List[str] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ), ("""pop(""" + a + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """ ) stack.append( str(opr[x](int(_lowercase ), int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ), ("""push(""" + a + x + b + """)""").ljust(12 ), """,""".join(_lowercase ), sep=""" | """, ) return int(stack[0] ) if __name__ == "__main__": __a = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
66
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
1
"""simple docstring""" def A_ ( _lowercase, _lowercase ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase, int(b / 2 ) ) * actual_power(_lowercase, int(b / 2 ) ) else: return a * actual_power(_lowercase, int(b / 2 ) ) * actual_power(_lowercase, int(b / 2 ) ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' if b < 0: return 1 / actual_power(_lowercase, _lowercase ) return actual_power(_lowercase, _lowercase ) if __name__ == "__main__": print(power(-2, -3))
66
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def A_ ( _lowercase, _lowercase ): '''simple docstring''' snake_case_ :int = XCLIPTextConfig() # derive patch size from model name snake_case_ :Union[str, Any] = model_name.find("""patch""" ) snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase ) if "large" in model_name: snake_case_ :Optional[Any] = 768 snake_case_ :Union[str, Any] = 3072 snake_case_ :Any = 12 snake_case_ :Any = 1024 snake_case_ :str = 4096 snake_case_ :Union[str, Any] = 16 snake_case_ :Union[str, Any] = 24 snake_case_ :Tuple = 768 snake_case_ :Any = 3072 if model_name == "xclip-large-patch14-16-frames": snake_case_ :Any = 336 snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase ) if "large" in model_name: snake_case_ :List[Any] = 768 return config def A_ ( _lowercase ): '''simple docstring''' if name == "token_embedding.weight": snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: snake_case_ :str = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: snake_case_ :int = name.replace("""c_proj""", """fc2""" ) if name.startswith("""transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" ) if "ln_final" in name: snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" ) if "visual.conv1" in name: snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" ) if "visual.proj" in name: snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" ) if "text_projection" in name: snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" ) if "prompts_visual_ln" in name: snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": snake_case_ :str = name.replace("""positional""", """position""" ) if name.startswith("""mit.resblocks""" ): snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" ) return name def A_ ( _lowercase, _lowercase ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ :Dict = orig_state_dict.pop(_lowercase ) if "attn.in_proj" in key: snake_case_ :Optional[Any] = key.split(""".""" ) if key.startswith("""visual""" ): snake_case_ :Any = key_split[3] snake_case_ :Optional[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ :str = val[ :dim, : ] snake_case_ :Optional[int] = val[ dim : dim * 2, : ] snake_case_ :Union[str, Any] = val[ -dim:, : ] else: snake_case_ :Dict = val[ :dim ] snake_case_ :Optional[int] = val[ dim : dim * 2 ] snake_case_ :Optional[int] = val[ -dim: ] else: if "weight" in key: snake_case_ :Optional[Any] = val[ :dim, : ] snake_case_ :List[str] = val[ dim : dim * 2, : ] snake_case_ :Dict = val[ -dim:, : ] else: snake_case_ :Union[str, Any] = val[:dim] snake_case_ :Union[str, Any] = val[ dim : dim * 2 ] snake_case_ :Union[str, Any] = val[-dim:] elif key.startswith("""mit""" ): snake_case_ :Tuple = key_split[2] snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ :Optional[int] = val[:dim, :] snake_case_ :Optional[int] = val[dim : dim * 2, :] snake_case_ :str = val[-dim:, :] else: snake_case_ :str = val[:dim] snake_case_ :Any = val[dim : dim * 2] snake_case_ :int = val[-dim:] else: snake_case_ :Tuple = key_split[2] snake_case_ :Any = config.text_config.hidden_size if "weight" in key: snake_case_ :Dict = val[:dim, :] snake_case_ :Dict = val[ dim : dim * 2, : ] snake_case_ :List[str] = val[-dim:, :] else: snake_case_ :Any = val[:dim] snake_case_ :Tuple = val[ dim : dim * 2 ] snake_case_ :List[str] = val[-dim:] else: snake_case_ :Optional[int] = rename_key(_lowercase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ :Optional[Any] = val.T snake_case_ :Tuple = val return orig_state_dict def A_ ( _lowercase ): '''simple docstring''' if num_frames == 8: snake_case_ :str = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: snake_case_ :int = """eating_spaghetti.npy""" elif num_frames == 32: snake_case_ :List[str] = """eating_spaghetti_32_frames.npy""" snake_case_ :int = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", ) snake_case_ :Union[str, Any] = np.load(_lowercase ) return list(_lowercase ) def A_ ( _lowercase, _lowercase=None, _lowercase=False ): '''simple docstring''' snake_case_ :List[Any] = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } snake_case_ :Optional[int] = model_to_url[model_name] snake_case_ :int = 8 if "16-frames" in model_name: snake_case_ :List[Any] = 16 elif "shot" in model_name: snake_case_ :Dict = 32 snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase ) snake_case_ :Optional[Any] = XCLIPModel(_lowercase ) model.eval() if "drive" in checkpoint_url: snake_case_ :List[str] = """pytorch_model.bin""" gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase ) snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""] else: snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""] snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase ) snake_case_ :str = XCLIPModel(_lowercase ) snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase ) snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase ) snake_case_ :Optional[int] = prepare_video(_lowercase ) snake_case_ :Optional[Any] = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase ) print("""Shape of pixel values:""", inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ :List[Any] = model(**_lowercase ) # Verify outputs snake_case_ :List[Any] = outputs.logits_per_video snake_case_ :Any = logits_per_video.softmax(dim=1 ) print("""Probs:""", _lowercase ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] ) elif model_name == "xclip-base-patch16": snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] ) elif model_name == "xclip-large-patch14": snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(_lowercase, _lowercase, atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_lowercase, organization="""nielsr""" ) processor.push_to_hub(_lowercase, organization="""nielsr""" ) slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __a = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
66
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {"vocab_file": "spiece.model"} __a = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } __a = { "albert-base-v1": 5_12, "albert-large-v1": 5_12, "albert-xlarge-v1": 5_12, "albert-xxlarge-v1": 5_12, "albert-base-v2": 5_12, "albert-large-v2": 5_12, "albert-xlarge-v2": 5_12, "albert-xxlarge-v2": 5_12, } __a = "▁" class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : List[Any] = VOCAB_FILES_NAMES _A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self: List[Any] , snake_case: Union[str, Any] , snake_case: Tuple=True , snake_case: Tuple=True , snake_case: List[str]=False , snake_case: str="[CLS]" , snake_case: Union[str, Any]="[SEP]" , snake_case: Union[str, Any]="<unk>" , snake_case: int="[SEP]" , snake_case: Optional[Any]="<pad>" , snake_case: Dict="[CLS]" , snake_case: Any="[MASK]" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: int , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ :int = ( AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case ) if isinstance(snake_case , snake_case ) else mask_token ) snake_case_ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) snake_case_ :Any = do_lower_case snake_case_ :Optional[int] = remove_space snake_case_ :Optional[Any] = keep_accents snake_case_ :List[str] = vocab_file snake_case_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @property def lowerCAmelCase_ ( self: int ) -> str: return len(self.sp_model ) def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]: snake_case_ :Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self: str ) -> List[str]: snake_case_ :List[str] = self.__dict__.copy() snake_case_ :Optional[Any] = None return state def __setstate__( self: Optional[int] , snake_case: Union[str, Any] ) -> Dict: snake_case_ :List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ :str = {} snake_case_ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] ) -> List[str]: if self.remove_space: snake_case_ :str = """ """.join(inputs.strip().split() ) else: snake_case_ :List[str] = inputs snake_case_ :Union[str, Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: snake_case_ :Optional[Any] = unicodedata.normalize("""NFKD""" , snake_case ) snake_case_ :Tuple = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] ) if self.do_lower_case: snake_case_ :Optional[Any] = outputs.lower() return outputs def lowerCAmelCase_ ( self: List[str] , snake_case: str ) -> List[str]: snake_case_ :Tuple = self.preprocess_text(snake_case ) snake_case_ :str = self.sp_model.encode(snake_case , out_type=snake_case ) snake_case_ :List[str] = [] for piece in pieces: if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): snake_case_ :Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: snake_case_ :List[Any] = cur_pieces[1:] else: snake_case_ :Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(snake_case ) else: new_pieces.append(snake_case ) return new_pieces def lowerCAmelCase_ ( self: Optional[int] , snake_case: Union[str, Any] ) -> Any: return self.sp_model.PieceToId(snake_case ) def lowerCAmelCase_ ( self: Tuple , snake_case: Dict ) -> str: return self.sp_model.IdToPiece(snake_case ) def lowerCAmelCase_ ( self: List[str] , snake_case: Dict ) -> Optional[int]: snake_case_ :str = [] snake_case_ :Tuple = """""" snake_case_ :Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case ) + token snake_case_ :Dict = True snake_case_ :Dict = [] else: current_sub_tokens.append(snake_case ) snake_case_ :Dict = False out_string += self.sp_model.decode(snake_case ) return out_string.strip() def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]: snake_case_ :int = [self.sep_token_id] snake_case_ :Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is not None: return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1] return [1] + ([0] * len(snake_case )) + [1] def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]: snake_case_ :Tuple = [self.sep_token_id] snake_case_ :Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self: Dict , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ :Any = os.path.join( snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , """wb""" ) as fi: snake_case_ :Dict = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
66
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :Any = seq_length snake_case_ :List[str] = is_training snake_case_ :Optional[Any] = use_attention_mask snake_case_ :Dict = use_token_type_ids snake_case_ :Union[str, Any] = use_labels snake_case_ :str = vocab_size snake_case_ :int = hidden_size snake_case_ :List[str] = num_hidden_layers snake_case_ :Dict = num_attention_heads snake_case_ :Any = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Any = max_position_embeddings snake_case_ :Union[str, Any] = type_vocab_size snake_case_ :Optional[int] = type_sequence_label_size snake_case_ :Union[str, Any] = initializer_range snake_case_ :Tuple = num_choices def lowerCAmelCase_ ( self: Tuple ) -> str: snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ :Union[str, Any] = None if self.use_attention_mask: snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ :Any = None if self.use_token_type_ids: snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ :int = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case_ :str = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case_ :int = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs snake_case_ :Union[str, Any] = True snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : List[str] = True _A : Dict = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case_ :Any = FlaxBertModelTester(self ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Dict: # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" ) snake_case_ :Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case )
66
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __a = logging.get_logger(__name__) __a = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Optional[Any] = """mctct""" def __init__( self: Optional[Any] , snake_case: List[str]=8_065 , snake_case: Optional[Any]=1_536 , snake_case: str=36 , snake_case: Optional[int]=6_144 , snake_case: Any=4 , snake_case: Any=384 , snake_case: Optional[Any]=920 , snake_case: Dict=1E-5 , snake_case: Any=0.3 , snake_case: Optional[Any]="relu" , snake_case: Tuple=0.0_2 , snake_case: int=0.3 , snake_case: Dict=0.3 , snake_case: Optional[int]=1 , snake_case: Dict=0 , snake_case: Optional[Any]=2 , snake_case: str=1 , snake_case: Union[str, Any]=0.3 , snake_case: List[str]=1 , snake_case: Tuple=(7,) , snake_case: Optional[Any]=(3,) , snake_case: int=80 , snake_case: List[str]=1 , snake_case: Any=None , snake_case: Union[str, Any]="sum" , snake_case: Any=False , **snake_case: List[Any] , ) -> Dict: super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) snake_case_ :int = vocab_size snake_case_ :List[Any] = hidden_size snake_case_ :str = num_hidden_layers snake_case_ :Tuple = intermediate_size snake_case_ :Optional[Any] = num_attention_heads snake_case_ :Dict = attention_head_dim snake_case_ :Optional[int] = max_position_embeddings snake_case_ :str = layer_norm_eps snake_case_ :Tuple = layerdrop snake_case_ :Any = hidden_act snake_case_ :Optional[int] = initializer_range snake_case_ :List[str] = hidden_dropout_prob snake_case_ :int = attention_probs_dropout_prob snake_case_ :int = pad_token_id snake_case_ :Optional[int] = bos_token_id snake_case_ :int = eos_token_id snake_case_ :Tuple = conv_glu_dim snake_case_ :List[str] = conv_dropout snake_case_ :List[str] = num_conv_layers snake_case_ :int = input_feat_per_channel snake_case_ :List[str] = input_channels snake_case_ :Tuple = conv_channels snake_case_ :Dict = ctc_loss_reduction snake_case_ :Any = ctc_zero_infinity # prevents config testing fail with exporting to json snake_case_ :Any = list(snake_case ) snake_case_ :str = list(snake_case ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """ f"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """ f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
66
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int: snake_case_ :Any = 0.0 snake_case_ :Tuple = 0.0 for i in range(len(snake_case ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]: for i in range(len(snake_case ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def A_ ( ): '''simple docstring''' snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training snake_case_ :Optional[Any] = SelfOrganizingMap() snake_case_ :Dict = 3 snake_case_ :Dict = 0.5 for _ in range(_lowercase ): for j in range(len(_lowercase ) ): # training sample snake_case_ :List[Any] = training_samples[j] # Compute the winning vector snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase ) # Update the winning vector snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase ) # classify test sample snake_case_ :str = [0, 0, 0, 1] snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase ) # results print(f"""Clusters that the test sample belongs to : {winner}""" ) print(f"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
66
1
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __a = logging.get_logger(__name__) class lowerCamelCase : '''simple docstring''' _A : str _A : str = None @staticmethod def lowerCAmelCase_ ( ) -> Dict: raise NotImplementedError def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: int , snake_case: str , **snake_case: Union[str, Any] ) -> List[Any]: raise NotImplementedError def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] ) -> Optional[int]: raise NotImplementedError def lowerCAmelCase_ ( self: Dict ) -> List[Any]: if not self.is_available(): raise RuntimeError( f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def lowerCAmelCase_ ( cls: Tuple ) -> Optional[Any]: return f"""`pip install {cls.pip_package or cls.name}`""" class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Tuple = """optuna""" @staticmethod def lowerCAmelCase_ ( ) -> Optional[int]: return is_optuna_available() def lowerCAmelCase_ ( self: Tuple , snake_case: str , snake_case: int , snake_case: str , **snake_case: Dict ) -> str: return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Tuple ) -> List[str]: return default_hp_space_optuna(snake_case ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = """ray""" _A : Optional[Any] = """'ray[tune]'""" @staticmethod def lowerCAmelCase_ ( ) -> Union[str, Any]: return is_ray_available() def lowerCAmelCase_ ( self: str , snake_case: Any , snake_case: int , snake_case: str , **snake_case: Tuple ) -> Dict: return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case ) def lowerCAmelCase_ ( self: List[Any] , snake_case: str ) -> Any: return default_hp_space_ray(snake_case ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = """sigopt""" @staticmethod def lowerCAmelCase_ ( ) -> Union[str, Any]: return is_sigopt_available() def lowerCAmelCase_ ( self: int , snake_case: str , snake_case: int , snake_case: str , **snake_case: Optional[int] ) -> int: return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case ) def lowerCAmelCase_ ( self: Tuple , snake_case: str ) -> List[str]: return default_hp_space_sigopt(snake_case ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : List[Any] = """wandb""" @staticmethod def lowerCAmelCase_ ( ) -> Union[str, Any]: return is_wandb_available() def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[Any] , snake_case: int , snake_case: str , **snake_case: int ) -> Union[str, Any]: return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case ) def lowerCAmelCase_ ( self: str , snake_case: Tuple ) -> Union[str, Any]: return default_hp_space_wandb(snake_case ) __a = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def A_ ( ): '''simple docstring''' snake_case_ :List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_lowercase ) > 0: snake_case_ :Any = available_backends[0].name if len(_lowercase ) > 1: logger.info( f"""{len(_lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
66
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple: snake_case_ :Dict = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = image_size snake_case_ :List[Any] = patch_size snake_case_ :int = num_channels snake_case_ :Tuple = embed_dim snake_case_ :str = depths snake_case_ :str = num_heads snake_case_ :Optional[int] = window_size snake_case_ :Tuple = mlp_ratio snake_case_ :Any = qkv_bias snake_case_ :List[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :Union[str, Any] = drop_path_rate snake_case_ :Any = hidden_act snake_case_ :Optional[Any] = use_absolute_embeddings snake_case_ :Union[str, Any] = patch_norm snake_case_ :Dict = layer_norm_eps snake_case_ :str = initializer_range snake_case_ :Tuple = is_training snake_case_ :Tuple = scope snake_case_ :Union[str, Any] = use_labels snake_case_ :Optional[Any] = type_sequence_label_size snake_case_ :Dict = encoder_stride def lowerCAmelCase_ ( self: int ) -> int: snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :Any = None if self.use_labels: snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :int = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]: snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Optional[int] = model(snake_case ) snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any: snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ :Tuple = model(snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ :List[Any] = 1 snake_case_ :int = SwinvaForMaskedImageModeling(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :int = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple: snake_case_ :int = self.type_sequence_label_size snake_case_ :List[Any] = SwinvaForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ :Dict = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: int ) -> str: snake_case_ :Any = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Any = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case_ :Optional[int] = SwinvaModelTester(self ) snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: int ) -> Dict: pass def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ :List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :Optional[int] = model_class(snake_case ) snake_case_ :List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :int = [*signature.parameters.keys()] snake_case_ :List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :List[str] = True for model_class in self.all_model_classes: snake_case_ :List[Any] = True snake_case_ :Any = False snake_case_ :Optional[int] = True snake_case_ :Tuple = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.attentions snake_case_ :Dict = len(self.model_tester.depths ) self.assertEqual(len(snake_case ) , snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ :Union[str, Any] = True snake_case_ :Tuple = config.window_size**2 snake_case_ :Any = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :int = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ :Any = len(snake_case ) # Check attention is always last and order is fine snake_case_ :int = True snake_case_ :Dict = True snake_case_ :Optional[int] = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ :Any = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ :int = 2 self.assertEqual(out_len + added_hidden_states , len(snake_case ) ) snake_case_ :str = outputs.attentions self.assertEqual(len(snake_case ) , snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]: snake_case_ :Dict = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ :str = outputs.hidden_states snake_case_ :List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(snake_case ) , snake_case ) # Swinv2 has a different seq_length snake_case_ :List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ :str = outputs.reshaped_hidden_states self.assertEqual(len(snake_case ) , snake_case ) snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape snake_case_ :int = ( reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ :Union[str, Any] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :List[str] = True self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = 3 snake_case_ :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ :str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ :str = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ :Tuple = True self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: List[Any] ) -> Dict: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ :Optional[int] = _config_zero_init(snake_case ) for model_class in self.all_model_classes: snake_case_ :Tuple = model_class(config=snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( snake_case ) snake_case_ :str = self.default_image_processor snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
66
1