code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["VisionEncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["TFVisionEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ConsistencyModelPipeline _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _UpperCamelCase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase__ ( self , A_=False ) ->Dict: '''simple docstring''' if class_cond: __lowerCAmelCase : List[str] = self.dummy_cond_unet else: __lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Dict = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple: '''simple docstring''' if str(A_ ).startswith('''mps''' ): __lowerCAmelCase : str = torch.manual_seed(A_ ) else: __lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Any = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Dict = None __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = torch.manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ ) __lowerCAmelCase : Union[str, Any] = latents return inputs def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]: '''simple docstring''' if type(A_ ) == str: __lowerCAmelCase : int = torch.device(A_ ) __lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) return latents def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = self.get_inputs() __lowerCAmelCase : Any = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : List[Any] = self.get_inputs() __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
275
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """camembert""" def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) ->List[Any]: '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) __lowerCAmelCase : Optional[int] = vocab_size __lowerCAmelCase : Optional[int] = hidden_size __lowerCAmelCase : str = num_hidden_layers __lowerCAmelCase : List[Any] = num_attention_heads __lowerCAmelCase : int = hidden_act __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : List[Any] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : Union[str, Any] = max_position_embeddings __lowerCAmelCase : Optional[Any] = type_vocab_size __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : Dict = position_embedding_type __lowerCAmelCase : Union[str, Any] = use_cache __lowerCAmelCase : Tuple = classifier_dropout class __lowercase (_UpperCAmelCase ): @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __lowerCAmelCase : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
275
from collections import deque from .hash_table import HashTable class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' super().__init__(*A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCAmelCase : int = self.values[key] def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , A_ , A_=None ) ->str: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
275
1
from __future__ import annotations from typing import Any def _lowercase ( lowercase__ ): if not postfix_notation: return 0 __lowerCAmelCase : Tuple = {'''+''', '''-''', '''*''', '''/'''} __lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: __lowerCAmelCase, __lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(lowercase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
275
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Optional[Any] = global_rng __lowerCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = parent __lowerCAmelCase : Optional[int] = batch_size __lowerCAmelCase : Any = min_seq_length __lowerCAmelCase : Tuple = max_seq_length __lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Dict = feature_size __lowerCAmelCase : Optional[int] = padding_value __lowerCAmelCase : Tuple = sampling_rate __lowerCAmelCase : Union[str, Any] = return_attention_mask __lowerCAmelCase : Dict = do_normalize def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WavaVecaFeatureExtractor def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : List[Any] = np.asarray(A_ ) __lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : str = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 ) __lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths] __lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : List[str] = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ ) __lowerCAmelCase : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : List[str] = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) __lowerCAmelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : int = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Optional[int] = feat_extract( A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' import torch __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa ) __lowerCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCamelCase__ ( self ) ->int: '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ ) __lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
275
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json", "google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json", "google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json", "google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """mobilenet_v2""" def __init__( self , A_=3 , A_=224 , A_=1.0 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_="relu6" , A_=True , A_=0.8 , A_=0.02 , A_=0.001 , A_=255 , **A_ , ) ->Tuple: '''simple docstring''' super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) __lowerCAmelCase : List[str] = num_channels __lowerCAmelCase : Optional[Any] = image_size __lowerCAmelCase : List[Any] = depth_multiplier __lowerCAmelCase : int = depth_divisible_by __lowerCAmelCase : Union[str, Any] = min_depth __lowerCAmelCase : List[str] = expand_ratio __lowerCAmelCase : Union[str, Any] = output_stride __lowerCAmelCase : str = first_layer_is_expansion __lowerCAmelCase : Union[str, Any] = finegrained_output __lowerCAmelCase : Dict = hidden_act __lowerCAmelCase : Dict = tf_padding __lowerCAmelCase : List[Any] = classifier_dropout_prob __lowerCAmelCase : Optional[Any] = initializer_range __lowerCAmelCase : Any = layer_norm_eps __lowerCAmelCase : Union[str, Any] = semantic_loss_ignore_index class __lowercase (_UpperCAmelCase ): _UpperCamelCase = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def UpperCamelCase__ ( self ) ->float: '''simple docstring''' return 1e-4
275
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : List[Any] = use_input_mask __lowerCAmelCase : Optional[int] = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : int = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : List[str] = relative_attention __lowerCAmelCase : Union[str, Any] = position_biased_input __lowerCAmelCase : int = pos_att_type __lowerCAmelCase : List[Any] = scope def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCAmelCase : List[str] = None if self.use_token_type_ids: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : int = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_config() __lowerCAmelCase : Dict = 300 return config def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0] __lowerCAmelCase : List[str] = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = DebertaModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase (unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. __lowerCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
275
1
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _lowercase ( lowercase__ , lowercase__ , lowercase__ = 1_0**-1_0 ): __lowerCAmelCase : List[str] = a while True: __lowerCAmelCase : Any = Decimal(lowercase__ ) - ( Decimal(eval(lowercase__ ) ) / Decimal(eval(str(diff(lowercase__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(lowercase__ ) ) < precision: # noqa: S307 return float(lowercase__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") # Find Square Root of 5 print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") # Exponential Roots print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
275
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowercase ( lowercase__ ): __lowerCAmelCase : str = [] __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : str = [] for rt in rc.restypes: __lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) __lowerCAmelCase : List[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Optional[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Tuple = torch.tensor( lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , ) __lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : int = residx_atomaa_mask __lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask __lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): __lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter] __lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __lowerCAmelCase : str = rc.atom_order[atom_name] __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : Any = residx_atomaa_mask return protein def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray ) __lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) ) return out
275
1
def _lowercase ( lowercase__ , lowercase__ ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
275
def _lowercase ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowerCAmelCase : int = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": _UpperCamelCase = input("Enter a string ").strip() _UpperCamelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
275
1
from ..utils import DummyObject, requires_backends class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Tuple: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Tuple: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Dict: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->str: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->Optional[Any]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->List[str]: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] ) class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""sentencepiece"""] def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(self , ['''sentencepiece'''] )
275
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None class __lowercase (_UpperCAmelCase , _UpperCAmelCase ): _UpperCamelCase = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]: '''simple docstring''' __lowerCAmelCase : str = num_inference_steps __lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ ) __lowerCAmelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) __lowerCAmelCase : str = sigma + gamma * sigma __lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = sample_prev + sigma_prev * model_output __lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' raise NotImplementedError()
275
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "tanreinama/GPTSAN-2.8B-spout_is_uniform": ( "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """gptsan-japanese""" _UpperCamelCase = [ """past_key_values""", ] _UpperCamelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , A_=3_6000 , A_=1280 , A_=1024 , A_=8192 , A_=4096 , A_=128 , A_=10 , A_=0 , A_=16 , A_=16 , A_=128 , A_=0.0 , A_=1e-5 , A_=False , A_=0.0 , A_="float32" , A_=False , A_=False , A_=False , A_=0.002 , A_=False , A_=True , A_=3_5998 , A_=3_5995 , A_=3_5999 , **A_ , ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = vocab_size __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Any = d_model __lowerCAmelCase : List[Any] = d_ff __lowerCAmelCase : List[str] = d_ext __lowerCAmelCase : Optional[int] = d_spout __lowerCAmelCase : int = num_switch_layers __lowerCAmelCase : str = num_ext_layers __lowerCAmelCase : Optional[Any] = num_switch_layers + num_ext_layers __lowerCAmelCase : Union[str, Any] = num_heads __lowerCAmelCase : List[Any] = num_experts __lowerCAmelCase : int = expert_capacity __lowerCAmelCase : List[Any] = dropout_rate __lowerCAmelCase : Dict = layer_norm_epsilon __lowerCAmelCase : int = router_bias __lowerCAmelCase : str = router_jitter_noise __lowerCAmelCase : Union[str, Any] = router_dtype __lowerCAmelCase : Any = router_ignore_padding_tokens __lowerCAmelCase : Dict = output_hidden_states __lowerCAmelCase : Union[str, Any] = output_attentions __lowerCAmelCase : Union[str, Any] = initializer_factor __lowerCAmelCase : int = output_router_logits __lowerCAmelCase : Optional[int] = use_cache super().__init__( separator_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , **A_ , )
275
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : int = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowerCAmelCase : int = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase : List[Any] = self.tokenizer.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCAmelCase : Optional[int] = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase : Any = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: __lowerCAmelCase : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] __lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features] __lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ ) __lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features] __lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ ) __lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
275
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _UpperCamelCase = logging.get_logger(__name__) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : List[Any] = size if size is not None else {'''shortest_edge''': 256} __lowerCAmelCase : List[Any] = get_size_dict(A_ , default_to_square=A_ ) __lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCAmelCase : Optional[Any] = get_size_dict(A_ ) __lowerCAmelCase : List[str] = do_resize __lowerCAmelCase : Optional[Any] = size __lowerCAmelCase : int = resample __lowerCAmelCase : Optional[int] = do_center_crop __lowerCAmelCase : Optional[Any] = crop_size __lowerCAmelCase : Union[str, Any] = do_rescale __lowerCAmelCase : str = rescale_factor __lowerCAmelCase : Optional[Any] = do_normalize __lowerCAmelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : Optional[Any] = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCAmelCase : Optional[int] = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : str = get_size_dict(A_ ) return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ ) ->np.ndarray: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Dict = size if size is not None else self.size __lowerCAmelCase : str = get_size_dict(A_ , default_to_square=A_ ) __lowerCAmelCase : str = resample if resample is not None else self.resample __lowerCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size __lowerCAmelCase : List[str] = get_size_dict(A_ ) __lowerCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : str = image_std if image_std is not None else self.image_std __lowerCAmelCase : Any = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : Optional[int] = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : List[str] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __lowerCAmelCase : List[str] = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : Tuple = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : List[Any] = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ )
275
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): _UpperCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = (3, 32, 128) __lowerCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off __lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) __lowerCAmelCase : Union[str, Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __lowerCAmelCase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Optional[int] = self.prepare_image_inputs() __lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' ) __lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Any = '''test''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : str = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = '''test''' __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : int = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Optional[int] = processor.char_decode(A_ ) __lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ ) __lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 ) __lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 ) __lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 ) __lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
275
1
import os import string import sys _UpperCamelCase = 1 << 8 _UpperCamelCase = { "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } _UpperCamelCase = KEYMAP["up"] _UpperCamelCase = KEYMAP["left"] if sys.platform == "win32": _UpperCamelCase = [] _UpperCamelCase = { B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): _UpperCamelCase = ord(str(i)) def _lowercase ( ): if os.name == "nt": import msvcrt __lowerCAmelCase : Tuple = '''mbcs''' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke __lowerCAmelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __lowerCAmelCase : List[str] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __lowerCAmelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(1_2_6 ) ) __lowerCAmelCase : str = chr(KEYMAP['''esc'''] ) except KeyError: __lowerCAmelCase : List[str] = cha[1] else: __lowerCAmelCase : Union[str, Any] = ch.decode(lowercase__ ) else: __lowerCAmelCase : Optional[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __lowerCAmelCase : Optional[int] = sys.stdin.fileno() __lowerCAmelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) __lowerCAmelCase : int = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _lowercase ( ): __lowerCAmelCase : Tuple = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: __lowerCAmelCase : Tuple = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: __lowerCAmelCase : int = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
275
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _lowercase ( lowercase__ ): __lowerCAmelCase, __lowerCAmelCase : str = analyze_text(lowercase__ ) __lowerCAmelCase : Tuple = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. __lowerCAmelCase : List[str] = sum(single_char_strings.values() ) # one length string __lowerCAmelCase : Any = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __lowerCAmelCase : Optional[int] = single_char_strings[ch] __lowerCAmelCase : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(lowercase__ ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string __lowerCAmelCase : str = sum(two_char_strings.values() ) __lowerCAmelCase : Optional[int] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __lowerCAmelCase : Tuple = cha + cha if sequence in two_char_strings: __lowerCAmelCase : List[str] = two_char_strings[sequence] __lowerCAmelCase : Optional[Any] = int(lowercase__ ) / all_sum my_sec_sum += prob * math.loga(lowercase__ ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[int] = Counter() # type: ignore __lowerCAmelCase : Optional[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowercase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _lowercase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
275
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _UpperCamelCase = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _UpperCamelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _UpperCamelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] ) return (item, float(lowercase__ )) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 ) __lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:] __lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = list(lowercase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCAmelCase : int = random.choice(lowercase__ ) return "".join(lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : str = [] # Generate more children proportionally to the fitness score. __lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1 __lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n for _ in range(lowercase__ ): __lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0] __lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ ) # Append new string to the population list. pop.append(mutate(lowercase__ , lowercase__ ) ) pop.append(mutate(lowercase__ , lowercase__ ) ) return pop def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(lowercase__ ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCAmelCase : Any = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(lowercase__ ) # Generate random starting population. __lowerCAmelCase : List[Any] = [] for _ in range(lowercase__ ): population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowercase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population] # Check if there is a matching evolution. __lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowercase__ ) # Normalize population score to be between 0 and 1. __lowerCAmelCase : List[Any] = [ (item, score / len(lowercase__ )) for item, score in population_score ] # This is selection for i in range(lowercase__ ): population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowercase__ ) > N_POPULATION: break if __name__ == "__main__": _UpperCamelCase = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _UpperCamelCase = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
275
1
from __future__ import annotations _UpperCamelCase = tuple[int, int, int] _UpperCamelCase = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- _UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR" _UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW" _UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- _UpperCamelCase = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- _UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA" _UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM" _UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN" _UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE" _UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN" _UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): # Checks if there are 3 unique rotors if (unique_rotsel := len(set(lowercase__ ) )) < 3: __lowerCAmelCase : Optional[Any] = f"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(lowercase__ ) # Checks if rotor positions are valid __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = rotpos if not 0 < rotorposa <= len(lowercase__ ): __lowerCAmelCase : Optional[Any] = f"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(lowercase__ ) if not 0 < rotorposa <= len(lowercase__ ): __lowerCAmelCase : int = f"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(lowercase__ ) if not 0 < rotorposa <= len(lowercase__ ): __lowerCAmelCase : Dict = f"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(lowercase__ ) # Validates string and returns dict __lowerCAmelCase : List[Any] = _plugboard(lowercase__ ) return rotpos, rotsel, pbdict def _lowercase ( lowercase__ ): # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = f"""Plugboard setting isn't type string ({type(lowercase__ )})""" raise TypeError(lowercase__ ) elif len(lowercase__ ) % 2 != 0: __lowerCAmelCase : List[Any] = f"""Odd number of symbols ({len(lowercase__ )})""" raise Exception(lowercase__ ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique __lowerCAmelCase : Any = set() for i in pbstring: if i not in abc: __lowerCAmelCase : Optional[int] = f"""'{i}' not in list of symbols""" raise Exception(lowercase__ ) elif i in tmppbl: __lowerCAmelCase : int = f"""Duplicate symbol ({i})""" raise Exception(lowercase__ ) else: tmppbl.add(lowercase__ ) del tmppbl # Created the dictionary __lowerCAmelCase : Optional[int] = {} for j in range(0 , len(lowercase__ ) - 1 , 2 ): __lowerCAmelCase : Tuple = pbstring[j + 1] __lowerCAmelCase : int = pbstring[j] return pb def _lowercase ( lowercase__ , lowercase__ , lowercase__ = (rotora, rotora, rotora) , lowercase__ = "" , ): __lowerCAmelCase : List[Any] = text.upper() __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = _validator( lowercase__ , lowercase__ , plugb.upper() ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = rotor_position __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 __lowerCAmelCase : Optional[Any] = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: __lowerCAmelCase : Any = plugboard[symbol] # rotor ra -------------------------- __lowerCAmelCase : List[str] = abc.index(lowercase__ ) + rotorposa __lowerCAmelCase : Dict = rotora[index % len(lowercase__ )] # rotor rb -------------------------- __lowerCAmelCase : Optional[int] = abc.index(lowercase__ ) + rotorposa __lowerCAmelCase : Optional[Any] = rotora[index % len(lowercase__ )] # rotor rc -------------------------- __lowerCAmelCase : int = abc.index(lowercase__ ) + rotorposa __lowerCAmelCase : Any = rotora[index % len(lowercase__ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher __lowerCAmelCase : Any = reflector[symbol] # 2nd rotors __lowerCAmelCase : int = abc[rotora.index(lowercase__ ) - rotorposa] __lowerCAmelCase : Dict = abc[rotora.index(lowercase__ ) - rotorposa] __lowerCAmelCase : Optional[Any] = abc[rotora.index(lowercase__ ) - rotorposa] # 2nd plugboard if symbol in plugboard: __lowerCAmelCase : str = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(lowercase__ ): __lowerCAmelCase : List[Any] = 0 rotorposa += 1 if rotorposa >= len(lowercase__ ): __lowerCAmelCase : List[str] = 0 rotorposa += 1 if rotorposa >= len(lowercase__ ): __lowerCAmelCase : Optional[Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(lowercase__ ) return "".join(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII." _UpperCamelCase = (1, 1, 1) _UpperCamelCase = "pictures" _UpperCamelCase = (rotora, rotora, rotora) _UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
275
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : int = 1 __lowerCAmelCase : Optional[int] = 3 __lowerCAmelCase : Dict = (32, 32) __lowerCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ ) return image @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : int = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : int = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(A_ ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.dummy_cond_unet_upscale __lowerCAmelCase : List[Any] = DDPMScheduler() __lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' ) __lowerCAmelCase : Optional[Any] = self.dummy_vae __lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder __lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowerCAmelCase : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase : Optional[Any] = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __lowerCAmelCase : str = StableDiffusionUpscalePipeline( unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , ) __lowerCAmelCase : Optional[int] = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = '''A painting of a squirrel eating a burger''' __lowerCAmelCase : int = torch.Generator(device=A_ ).manual_seed(0 ) __lowerCAmelCase : List[str] = sd_pipe( [prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __lowerCAmelCase : Any = output.images __lowerCAmelCase : Any = torch.Generator(device=A_ ).manual_seed(0 ) __lowerCAmelCase : str = sd_pipe( [prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0] __lowerCAmelCase : int = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __lowerCAmelCase : Any = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.dummy_cond_unet_upscale __lowerCAmelCase : List[str] = DDPMScheduler() __lowerCAmelCase : List[str] = DDIMScheduler(prediction_type='''v_prediction''' ) __lowerCAmelCase : int = self.dummy_vae __lowerCAmelCase : Any = self.dummy_text_encoder __lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowerCAmelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase : Dict = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __lowerCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline( unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , ) __lowerCAmelCase : str = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : int = '''A painting of a squirrel eating a burger''' __lowerCAmelCase : List[str] = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __lowerCAmelCase : int = output.images assert image.shape[0] == 2 __lowerCAmelCase : List[str] = torch.Generator(device=A_ ).manual_seed(0 ) __lowerCAmelCase : int = sd_pipe( [prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __lowerCAmelCase : Union[str, Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.dummy_cond_unet_upscale __lowerCAmelCase : Any = DDPMScheduler() __lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' ) __lowerCAmelCase : List[Any] = self.dummy_vae __lowerCAmelCase : Dict = self.dummy_text_encoder __lowerCAmelCase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __lowerCAmelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase : int = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __lowerCAmelCase : Any = unet.half() __lowerCAmelCase : str = text_encoder.half() # make sure here that pndm scheduler skips prk __lowerCAmelCase : Dict = StableDiffusionUpscalePipeline( unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=350 , ) __lowerCAmelCase : List[Any] = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = '''A painting of a squirrel eating a burger''' __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Dict = sd_pipe( [prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , ).images __lowerCAmelCase : List[Any] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __lowerCAmelCase : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) __lowerCAmelCase : Dict = '''stabilityai/stable-diffusion-x4-upscaler''' __lowerCAmelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained(A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() __lowerCAmelCase : Union[str, Any] = '''a cat sitting on a park bench''' __lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) __lowerCAmelCase : List[str] = pipe( prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , ) __lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __lowerCAmelCase : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) __lowerCAmelCase : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler''' __lowerCAmelCase : int = StableDiffusionUpscalePipeline.from_pretrained( A_ , torch_dtype=torch.floataa , ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() __lowerCAmelCase : List[Any] = '''a cat sitting on a park bench''' __lowerCAmelCase : Optional[int] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pipe( prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , ) __lowerCAmelCase : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCAmelCase : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __lowerCAmelCase : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler''' __lowerCAmelCase : List[str] = StableDiffusionUpscalePipeline.from_pretrained( A_ , torch_dtype=torch.floataa , ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCAmelCase : str = '''a cat sitting on a park bench''' __lowerCAmelCase : int = torch.manual_seed(0 ) __lowerCAmelCase : int = pipe( prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type='''np''' , ) __lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
275
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """table-transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A_ , A_ ): __lowerCAmelCase : int = backbone_config.get('''model_type''' ) __lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase : Any = config_class.from_dict(A_ ) # set timm attributes to None __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None __lowerCAmelCase : Tuple = use_timm_backbone __lowerCAmelCase : Optional[Any] = backbone_config __lowerCAmelCase : List[str] = num_channels __lowerCAmelCase : Tuple = num_queries __lowerCAmelCase : int = d_model __lowerCAmelCase : List[Any] = encoder_ffn_dim __lowerCAmelCase : Optional[int] = encoder_layers __lowerCAmelCase : List[str] = encoder_attention_heads __lowerCAmelCase : str = decoder_ffn_dim __lowerCAmelCase : Union[str, Any] = decoder_layers __lowerCAmelCase : Any = decoder_attention_heads __lowerCAmelCase : Optional[int] = dropout __lowerCAmelCase : Any = attention_dropout __lowerCAmelCase : Tuple = activation_dropout __lowerCAmelCase : Optional[Any] = activation_function __lowerCAmelCase : List[str] = init_std __lowerCAmelCase : Tuple = init_xavier_std __lowerCAmelCase : Any = encoder_layerdrop __lowerCAmelCase : List[Any] = decoder_layerdrop __lowerCAmelCase : Optional[Any] = encoder_layers __lowerCAmelCase : Optional[Any] = auxiliary_loss __lowerCAmelCase : Optional[Any] = position_embedding_type __lowerCAmelCase : Tuple = backbone __lowerCAmelCase : Any = use_pretrained_backbone __lowerCAmelCase : int = dilation # Hungarian matcher __lowerCAmelCase : Dict = class_cost __lowerCAmelCase : List[str] = bbox_cost __lowerCAmelCase : int = giou_cost # Loss coefficients __lowerCAmelCase : Optional[Any] = mask_loss_coefficient __lowerCAmelCase : Tuple = dice_loss_coefficient __lowerCAmelCase : int = bbox_loss_coefficient __lowerCAmelCase : List[Any] = giou_loss_coefficient __lowerCAmelCase : int = eos_coefficient super().__init__(is_encoder_decoder=A_ , **A_ ) @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.d_model class __lowercase (_UpperCAmelCase ): _UpperCamelCase = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def UpperCamelCase__ ( self ) ->float: '''simple docstring''' return 1e-5 @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return 12
275
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller _UpperCamelCase = 3 def _lowercase ( lowercase__ ): print('''Generating primitive root of p''' ) while True: __lowerCAmelCase : int = random.randrange(3 , lowercase__ ) if pow(lowercase__ , 2 , lowercase__ ) == 1: continue if pow(lowercase__ , lowercase__ , lowercase__ ) == 1: continue return g def _lowercase ( lowercase__ ): print('''Generating prime p...''' ) __lowerCAmelCase : Optional[Any] = rabin_miller.generate_large_prime(lowercase__ ) # select large prime number. __lowerCAmelCase : Optional[Any] = primitive_root(lowercase__ ) # one primitive root on modulo p. __lowerCAmelCase : Optional[Any] = random.randrange(3 , lowercase__ ) # private_key -> have to be greater than 2 for safety. __lowerCAmelCase : List[Any] = cryptomath.find_mod_inverse(pow(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ) __lowerCAmelCase : List[str] = (key_size, e_a, e_a, p) __lowerCAmelCase : List[Any] = (key_size, d) return public_key, private_key def _lowercase ( lowercase__ , lowercase__ ): if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() __lowerCAmelCase, __lowerCAmelCase : Any = generate_key(lowercase__ ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def _lowercase ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2_0_4_8 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
275
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Any = global_rng __lowerCAmelCase : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : str = min_seq_length __lowerCAmelCase : int = max_seq_length __lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Any = padding_value __lowerCAmelCase : str = sampling_rate __lowerCAmelCase : Optional[Any] = return_attention_mask __lowerCAmelCase : Optional[Any] = do_normalize __lowerCAmelCase : Optional[Any] = feature_size __lowerCAmelCase : Optional[int] = chunk_length __lowerCAmelCase : Optional[Any] = hop_length def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0] check_json_file_has_correct_format(A_ ) __lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ ) __lowerCAmelCase : Dict = feat_extract_first.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters __lowerCAmelCase : Dict = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A_ ) __lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ ) __lowerCAmelCase : List[str] = feat_extract_first.to_dict() __lowerCAmelCase : Tuple = feat_extract_second.to_dict() __lowerCAmelCase : Any = feat_extract_first.mel_filters __lowerCAmelCase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : Optional[int] = np.asarray(A_ ) __lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] __lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated] __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' import torch __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) __lowerCAmelCase : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on __lowerCAmelCase : int = self._load_datasamples(1 ) __lowerCAmelCase : Any = WhisperFeatureExtractor() __lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = self._load_datasamples(1 )[0] __lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue __lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0] self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
275
1
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _UpperCamelCase = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") _UpperCamelCase = F"https://www.google.com/search?q={query}&num=100" _UpperCamelCase = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, ) try: _UpperCamelCase = ( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "yuRUbf"}) .find("a") .get("href") ) except AttributeError: _UpperCamelCase = parse_qs( BeautifulSoup(res.text, "html.parser") .find("div", attrs={"class": "kCrYT"}) .find("a") .get("href") )["url"][0] webbrowser.open(link)
275
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } _UpperCamelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = {} with open(lowercase__ , '''r''' ) as file: for line_number, line in enumerate(lowercase__ ): __lowerCAmelCase : Any = line.strip() if line: __lowerCAmelCase : Dict = line.split() __lowerCAmelCase : str = line_number __lowerCAmelCase : List[str] = words[0] __lowerCAmelCase : Any = value return result def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : List[Any] = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : List[Any] = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : str = getattr(lowercase__ , lowercase__ ).shape elif weight_type is not None and weight_type == "param": __lowerCAmelCase : Dict = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : str = shape_pointer.shape # let's reduce dimension __lowerCAmelCase : Any = value[0] else: __lowerCAmelCase : str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[str] = value elif weight_type == "weight_v": __lowerCAmelCase : int = value elif weight_type == "bias": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = value else: __lowerCAmelCase : Any = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : int = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : Tuple = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCAmelCase : List[str] = '''.'''.join([key, hf_param_name] ) else: __lowerCAmelCase : Optional[int] = key __lowerCAmelCase : Union[str, Any] = value if '''lm_head''' in full_key else value[0] _UpperCamelCase = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): __lowerCAmelCase : Any = False for key, mapped_key in MAPPING.items(): __lowerCAmelCase : Tuple = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Optional[Any] = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : List[Any] = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : List[Any] = '''weight_v''' elif "bias" in name: __lowerCAmelCase : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : int = '''weight''' else: __lowerCAmelCase : Any = None if hf_dict is not None: rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return is_used return is_used def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() __lowerCAmelCase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Any = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : int = True else: __lowerCAmelCase : Dict = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ ) if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : List[str] = name.split('''.''' ) __lowerCAmelCase : Any = int(items[0] ) __lowerCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ): if config_path is not None: __lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : Optional[int] = WavaVecaConfig() if is_seq_class: __lowerCAmelCase : Optional[Any] = read_txt_into_dict(lowercase__ ) __lowerCAmelCase : int = idalabel __lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowercase__ ) __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) feature_extractor.save_pretrained(lowercase__ ) elif is_finetuned: if dict_path: __lowerCAmelCase : List[str] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCAmelCase : List[Any] = target_dict.pad_index __lowerCAmelCase : List[Any] = target_dict.bos_index __lowerCAmelCase : Optional[int] = target_dict.eos_index __lowerCAmelCase : Any = len(target_dict.symbols ) __lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowerCAmelCase : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : int = 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __lowerCAmelCase : Dict = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __lowerCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowerCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowerCAmelCase : str = WavaVecaForCTC(lowercase__ ) else: __lowerCAmelCase : Any = WavaVecaForPreTraining(lowercase__ ) if is_finetuned or is_seq_class: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) __lowerCAmelCase : str = fairseq.tasks.setup_task(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __lowerCAmelCase : int = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
275
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } _UpperCamelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = {} with open(lowercase__ , '''r''' ) as file: for line_number, line in enumerate(lowercase__ ): __lowerCAmelCase : Any = line.strip() if line: __lowerCAmelCase : Dict = line.split() __lowerCAmelCase : str = line_number __lowerCAmelCase : List[str] = words[0] __lowerCAmelCase : Any = value return result def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : List[Any] = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : List[Any] = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : str = getattr(lowercase__ , lowercase__ ).shape elif weight_type is not None and weight_type == "param": __lowerCAmelCase : Dict = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : str = shape_pointer.shape # let's reduce dimension __lowerCAmelCase : Any = value[0] else: __lowerCAmelCase : str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[str] = value elif weight_type == "weight_v": __lowerCAmelCase : int = value elif weight_type == "bias": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = value else: __lowerCAmelCase : Any = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : int = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : Tuple = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCAmelCase : List[str] = '''.'''.join([key, hf_param_name] ) else: __lowerCAmelCase : Optional[int] = key __lowerCAmelCase : Union[str, Any] = value if '''lm_head''' in full_key else value[0] _UpperCamelCase = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): __lowerCAmelCase : Any = False for key, mapped_key in MAPPING.items(): __lowerCAmelCase : Tuple = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Optional[Any] = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : List[Any] = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : List[Any] = '''weight_v''' elif "bias" in name: __lowerCAmelCase : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : int = '''weight''' else: __lowerCAmelCase : Any = None if hf_dict is not None: rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return is_used return is_used def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() __lowerCAmelCase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Any = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : int = True else: __lowerCAmelCase : Dict = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ ) if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : List[str] = name.split('''.''' ) __lowerCAmelCase : Any = int(items[0] ) __lowerCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ): if config_path is not None: __lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : Optional[int] = WavaVecaConfig() if is_seq_class: __lowerCAmelCase : Optional[Any] = read_txt_into_dict(lowercase__ ) __lowerCAmelCase : int = idalabel __lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowercase__ ) __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) feature_extractor.save_pretrained(lowercase__ ) elif is_finetuned: if dict_path: __lowerCAmelCase : List[str] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCAmelCase : List[Any] = target_dict.pad_index __lowerCAmelCase : List[Any] = target_dict.bos_index __lowerCAmelCase : Optional[int] = target_dict.eos_index __lowerCAmelCase : Any = len(target_dict.symbols ) __lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowerCAmelCase : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : int = 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __lowerCAmelCase : Dict = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __lowerCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowerCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowerCAmelCase : str = WavaVecaForCTC(lowercase__ ) else: __lowerCAmelCase : Any = WavaVecaForPreTraining(lowercase__ ) if is_finetuned or is_seq_class: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) __lowerCAmelCase : str = fairseq.tasks.setup_task(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __lowerCAmelCase : int = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
275
from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """trajectory_transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int: '''simple docstring''' __lowerCAmelCase : Any = vocab_size __lowerCAmelCase : Tuple = action_weight __lowerCAmelCase : Tuple = reward_weight __lowerCAmelCase : Union[str, Any] = value_weight __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : str = block_size __lowerCAmelCase : Optional[Any] = action_dim __lowerCAmelCase : Union[str, Any] = observation_dim __lowerCAmelCase : Union[str, Any] = transition_dim __lowerCAmelCase : Dict = learning_rate __lowerCAmelCase : Any = n_layer __lowerCAmelCase : Any = n_head __lowerCAmelCase : Optional[int] = n_embd __lowerCAmelCase : str = embd_pdrop __lowerCAmelCase : Dict = attn_pdrop __lowerCAmelCase : Optional[int] = resid_pdrop __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : Any = kaiming_initializer_range __lowerCAmelCase : List[str] = use_cache super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
275
1
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
def _lowercase ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b" __lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:] __lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.12.2"): raise Exception("requires fairseq >= 0.12.2") if version.parse(fairseq.__version__) > version.parse("2"): raise Exception("requires fairseq < v2") logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = "Hello, World!" _UpperCamelCase = "en_XX" def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = Path('''data_bin''' ) __lowerCAmelCase : str = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(lowercase__ ).parent ) , checkpoint_file=Path(lowercase__ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(lowercase__ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(lowercase__ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , ) xmod.eval() # disable dropout print(lowercase__ ) __lowerCAmelCase : Dict = xmod.model.encoder.sentence_encoder __lowerCAmelCase : int = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: __lowerCAmelCase : Any = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' , lowercase__ ) __lowerCAmelCase : Any = XmodForSequenceClassification(lowercase__ ) if classification_head else XmodForMaskedLM(lowercase__ ) model.eval() # Now let's copy all the weights. # Embeddings __lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight __lowerCAmelCase : Tuple = xmod_sent_encoder.embed_positions.weight __lowerCAmelCase : List[str] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __lowerCAmelCase : List[Any] = xmod_sent_encoder.layernorm_embedding.weight __lowerCAmelCase : Dict = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __lowerCAmelCase : List[str] = model.roberta.encoder.layer[i] __lowerCAmelCase : Tuple = xmod_sent_encoder.layers[i] # self attention __lowerCAmelCase : Any = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('''Dimensions of self-attention weights do not match.''' ) __lowerCAmelCase : List[str] = xmod_layer.self_attn.q_proj.weight __lowerCAmelCase : int = xmod_layer.self_attn.q_proj.bias __lowerCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.weight __lowerCAmelCase : List[str] = xmod_layer.self_attn.k_proj.bias __lowerCAmelCase : Optional[int] = xmod_layer.self_attn.v_proj.weight __lowerCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias # self-attention output __lowerCAmelCase : Tuple = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''' ) __lowerCAmelCase : Dict = xmod_layer.self_attn.out_proj.weight __lowerCAmelCase : Optional[int] = xmod_layer.self_attn.out_proj.bias __lowerCAmelCase : Optional[int] = xmod_layer.self_attn_layer_norm.weight __lowerCAmelCase : Tuple = xmod_layer.self_attn_layer_norm.bias # intermediate __lowerCAmelCase : List[Any] = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''' ) __lowerCAmelCase : Tuple = xmod_layer.fca.weight __lowerCAmelCase : Optional[int] = xmod_layer.fca.bias # output __lowerCAmelCase : Tuple = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''' ) __lowerCAmelCase : Dict = xmod_layer.fca.weight __lowerCAmelCase : str = xmod_layer.fca.bias __lowerCAmelCase : str = xmod_layer.final_layer_norm.weight __lowerCAmelCase : List[Any] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __lowerCAmelCase : Tuple = xmod_layer.adapter_layer_norm.weight __lowerCAmelCase : int = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('''Lists of language adapters do not match.''' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __lowerCAmelCase : List[str] = bert_output.adapter_modules[lang_code] __lowerCAmelCase : Optional[int] = xmod_layer.adapter_modules[lang_code] __lowerCAmelCase : Tuple = from_adapter.fca.weight __lowerCAmelCase : Dict = from_adapter.fca.bias __lowerCAmelCase : Tuple = from_adapter.fca.weight __lowerCAmelCase : Optional[int] = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __lowerCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.weight __lowerCAmelCase : Dict = xmod_sent_encoder.layer_norm.bias if classification_head: __lowerCAmelCase : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight __lowerCAmelCase : Optional[Any] = xmod.model.classification_heads['''mnli'''].dense.bias __lowerCAmelCase : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight __lowerCAmelCase : Optional[int] = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head __lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight __lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias __lowerCAmelCase : int = xmod.model.encoder.lm_head.layer_norm.weight __lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.layer_norm.bias __lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.weight __lowerCAmelCase : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __lowerCAmelCase : Any = xmod.encode(lowercase__ ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(lowercase__ ) __lowerCAmelCase : Any = model(lowercase__ )[0] if classification_head: __lowerCAmelCase : List[Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(lowercase__ ) ) else: __lowerCAmelCase : List[str] = xmod.model(lowercase__ , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) __lowerCAmelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 __lowerCAmelCase : Tuple = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) Path(lowercase__ ).mkdir(parents=lowercase__ , exist_ok=lowercase__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) _UpperCamelCase = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
275
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ): __lowerCAmelCase : int = round(val / multiple ) * multiple if max_val is not None and x > max_val: __lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple return x __lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : int = output_size # determine new height and width __lowerCAmelCase : Optional[Any] = output_height / input_height __lowerCAmelCase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __lowerCAmelCase : str = scale_width else: # fit height __lowerCAmelCase : str = scale_height __lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ ) __lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ ) return (new_height, new_width) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384} __lowerCAmelCase : Dict = get_size_dict(A_ ) __lowerCAmelCase : Optional[Any] = do_resize __lowerCAmelCase : int = size __lowerCAmelCase : Dict = keep_aspect_ratio __lowerCAmelCase : List[Any] = ensure_multiple_of __lowerCAmelCase : Tuple = resample __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Any = rescale_factor __lowerCAmelCase : List[Any] = do_normalize __lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : int = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase : Union[str, Any] = get_resize_output_image_size( A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image: '''simple docstring''' __lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Optional[int] = size if size is not None else self.size __lowerCAmelCase : Union[str, Any] = get_size_dict(A_ ) __lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __lowerCAmelCase : Tuple = resample if resample is not None else self.resample __lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __lowerCAmelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : Dict = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __lowerCAmelCase : Optional[int] = target_sizes.numpy() __lowerCAmelCase : List[str] = [] for idx in range(len(A_ ) ): __lowerCAmelCase : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __lowerCAmelCase : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __lowerCAmelCase : Any = logits.argmax(dim=1 ) __lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
275
1
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase, __lowerCAmelCase : Dict = len(lowercase__ ), len(grid[0] ) if ( min(lowercase__ , lowercase__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __lowerCAmelCase : Any = 0 count += depth_first_search(lowercase__ , row + 1 , lowercase__ , lowercase__ ) count += depth_first_search(lowercase__ , row - 1 , lowercase__ , lowercase__ ) count += depth_first_search(lowercase__ , lowercase__ , col + 1 , lowercase__ ) count += depth_first_search(lowercase__ , lowercase__ , col - 1 , lowercase__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
275
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __lowerCAmelCase : Dict = Vector() def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(A_ ) , '''(0,0,0,0,0,1)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3, 4] ) self.assertEqual(len(A_ ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Vector([1, 2] ) __lowerCAmelCase : Optional[int] = Vector([1, 2, 3, 4, 5] ) __lowerCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __lowerCAmelCase : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : List[Any] = Vector([2, -1, 4] ) # for test of dot product __lowerCAmelCase : Optional[int] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : Any = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , A_ , A_ ) ) , '''(3,4,7)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) __lowerCAmelCase : Optional[Any] = x.copy() self.assertEqual(str(A_ ) , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(A_ ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
275
1
def _lowercase ( lowercase__ , lowercase__ ): return number | (1 << position) def _lowercase ( lowercase__ , lowercase__ ): return number & ~(1 << position) def _lowercase ( lowercase__ , lowercase__ ): return number ^ (1 << position) def _lowercase ( lowercase__ , lowercase__ ): return ((number >> position) & 1) == 1 def _lowercase ( lowercase__ , lowercase__ ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
275
def _lowercase ( lowercase__ , lowercase__ ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
275
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = [ "word_embeddings_layernorm.weight", "word_embeddings_layernorm.bias", "input_layernorm.weight", "input_layernorm.bias", "post_attention_layernorm.weight", "post_attention_layernorm.bias", "self_attention.dense.bias", "mlp.dense_4h_to_h.bias", "ln_f.weight", "ln_f.bias", ] _UpperCamelCase = [ "mlp.dense_4h_to_h.weight", "self_attention.dense.weight", ] def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Any = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks __lowerCAmelCase : str = int(re.match(r'''.*layer_(\d*).*''' , lowercase__ )[1] ) layer_number -= 3 return f"""h.{layer_number}.""" + key def _lowercase ( lowercase__ ): if dtype == torch.bool: return 1 / 8 __lowerCAmelCase : Dict = re.search(r'''[^\d](\d+)$''' , str(lowercase__ ) ) if bit_search is None: raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" ) __lowerCAmelCase : Dict = int(bit_search.groups()[0] ) return bit_size // 8 def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): # Construct model if bloom_config_file == "": __lowerCAmelCase : Tuple = BloomConfig() else: __lowerCAmelCase : List[Any] = BloomConfig.from_json_file(lowercase__ ) if shard_model: __lowerCAmelCase : Union[str, Any] = os.listdir(lowercase__ ) __lowerCAmelCase : Optional[int] = sorted(filter(lambda lowercase__ : s.startswith('''layer''' ) and "model_00" in s , lowercase__ ) ) __lowerCAmelCase : str = {'''weight_map''': {}, '''metadata''': {}} __lowerCAmelCase : Optional[Any] = 0 __lowerCAmelCase : int = None __lowerCAmelCase : List[Any] = BloomConfig() for j, file in enumerate(lowercase__ ): print('''Processing file: {}'''.format(lowercase__ ) ) __lowerCAmelCase : Union[str, Any] = None for i in range(lowercase__ ): # load all TP files __lowerCAmelCase : str = file.replace('''model_00''' , f"""model_0{i}""" ) __lowerCAmelCase : Tuple = torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location='''cpu''' ) # Rename keys in the transformers names __lowerCAmelCase : int = list(temp.keys() ) for key in keys: __lowerCAmelCase : List[str] = temp.pop(lowercase__ ) if tensors is None: __lowerCAmelCase : List[Any] = temp else: for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowerCAmelCase : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowerCAmelCase : int = torch.cat([tensors[key], temp[key]] , dim=lowercase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowerCAmelCase : int = tensors[key] / pretraining_tp torch.save( lowercase__ , os.path.join( lowercase__ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): __lowerCAmelCase : Union[str, Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: __lowerCAmelCase : int = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) ) __lowerCAmelCase : Optional[int] = BloomConfig() __lowerCAmelCase : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME __lowerCAmelCase : int = total_size with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowercase__ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: __lowerCAmelCase : int = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '''\n''' f.write(lowercase__ ) else: __lowerCAmelCase : Dict = BloomModel(lowercase__ ) __lowerCAmelCase : Optional[Any] = os.listdir(lowercase__ ) __lowerCAmelCase : Dict = sorted(filter(lambda lowercase__ : s.startswith('''layer''' ) and "model_00" in s , lowercase__ ) ) __lowerCAmelCase : Union[str, Any] = None for i, file in enumerate(lowercase__ ): __lowerCAmelCase : List[Any] = None for i in range(lowercase__ ): # load all TP files __lowerCAmelCase : Tuple = file.replace('''model_00''' , f"""model_0{i}""" ) __lowerCAmelCase : str = torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location='''cpu''' ) # Rename keys in the transformers names __lowerCAmelCase : Optional[Any] = list(temp.keys() ) for key in keys: __lowerCAmelCase : List[Any] = temp.pop(lowercase__ ) if tensors is None: __lowerCAmelCase : List[Any] = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowerCAmelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowerCAmelCase : List[str] = torch.cat([tensors[key], temp[key]] , dim=lowercase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowerCAmelCase : List[Any] = tensors[key] / pretraining_tp __lowerCAmelCase : Optional[int] = model.load_state_dict(lowercase__ , strict=lowercase__ ) assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: __lowerCAmelCase : Dict = set(other_keys.missing_keys ) else: __lowerCAmelCase : List[Any] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowerCAmelCase : Any = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME __lowerCAmelCase : str = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: __lowerCAmelCase : Union[str, Any] = model.to(config.torch_dtype ) torch.save(model.state_dict() , lowercase__ ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bloom_checkpoint_path", default=None, type=str, required=True, help="Path to the Megatron-LM checkpoint path.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--bloom_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--shard_model", action="store_true", help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint", ) parser.add_argument( "--pretraining_tp", default=4, type=int, help="Pretraining TP rank that has been used when training the model in Megatron-LM \n", ) _UpperCamelCase = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
275
def _lowercase ( lowercase__ , lowercase__ ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _lowercase ( lowercase__ , lowercase__=0 ): return sorted(lowercase__ , key=lambda lowercase__ : x[column] ) def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase__ ): __lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : Tuple = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(min(6 , points_counts - 1 ) , lowercase__ ): for j in range(max(0 , i - 6 ) , lowercase__ ): __lowerCAmelCase : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : int = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): # base case if points_counts <= 3: return dis_between_closest_pair(lowercase__ , lowercase__ ) # recursion __lowerCAmelCase : Optional[Any] = points_counts // 2 __lowerCAmelCase : Optional[Any] = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[:mid] , lowercase__ ) __lowerCAmelCase : str = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[mid:] , points_counts - mid ) __lowerCAmelCase : Optional[int] = min(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase__ ) __lowerCAmelCase : List[Any] = dis_between_closest_in_strip( lowercase__ , len(lowercase__ ) , lowercase__ ) return min(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = column_based_sort(lowercase__ , column=0 ) __lowerCAmelCase : Any = column_based_sort(lowercase__ , column=1 ) return ( closest_pair_of_points_sqr( lowercase__ , lowercase__ , lowercase__ ) ) ** 0.5 if __name__ == "__main__": _UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
275
1
import os from datetime import datetime as dt from github import Github _UpperCamelCase = [ "good first issue", "feature request", "wip", ] def _lowercase ( ): __lowerCAmelCase : List[str] = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCAmelCase : Optional[int] = g.get_repo('''huggingface/accelerate''' ) __lowerCAmelCase : Optional[Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCAmelCase : str = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __lowerCAmelCase : Dict = comments[0] if len(lowercase__ ) > 0 else None __lowerCAmelCase : Dict = dt.utcnow() __lowerCAmelCase : Any = (current_time - issue.updated_at).days __lowerCAmelCase : List[Any] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='''closed''' ) elif ( days_since_updated > 2_3 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
275
def _lowercase ( lowercase__ = 2_0_0 ): __lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __lowerCAmelCase : Dict = [0] * (pence + 1) __lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
275
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : Union[str, Any] = getattr(lowercase__ , lowercase__ ) if weight_type is not None: __lowerCAmelCase : int = getattr(lowercase__ , lowercase__ ).shape else: __lowerCAmelCase : Dict = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : Optional[int] = value elif weight_type == "weight_g": __lowerCAmelCase : Optional[int] = value elif weight_type == "weight_v": __lowerCAmelCase : List[str] = value elif weight_type == "bias": __lowerCAmelCase : Dict = value else: __lowerCAmelCase : Dict = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : int = [] __lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() __lowerCAmelCase : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : int = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : int = True else: for key, mapped_key in MAPPING.items(): __lowerCAmelCase : str = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Union[str, Any] = True if "*" in mapped_key: __lowerCAmelCase : int = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : List[Any] = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : Optional[int] = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : Union[str, Any] = '''weight_v''' elif "weight" in name: __lowerCAmelCase : Any = '''weight''' elif "bias" in name: __lowerCAmelCase : Any = '''bias''' else: __lowerCAmelCase : Optional[Any] = None set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) continue if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Tuple = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : Dict = name.split('''.''' ) __lowerCAmelCase : Any = int(items[0] ) __lowerCAmelCase : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowerCAmelCase : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : int = SEWConfig() if is_finetuned: __lowerCAmelCase : Optional[int] = model.wav_encoder.wav_model.cfg else: __lowerCAmelCase : List[str] = model.cfg __lowerCAmelCase : Dict = fs_config.conv_bias __lowerCAmelCase : Any = eval(fs_config.conv_feature_layers ) __lowerCAmelCase : Optional[Any] = [x[0] for x in conv_layers] __lowerCAmelCase : int = [x[1] for x in conv_layers] __lowerCAmelCase : List[Any] = [x[2] for x in conv_layers] __lowerCAmelCase : str = '''gelu''' __lowerCAmelCase : List[Any] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' __lowerCAmelCase : Any = 0.0 __lowerCAmelCase : str = fs_config.activation_fn.name __lowerCAmelCase : Tuple = fs_config.encoder_embed_dim __lowerCAmelCase : List[Any] = 0.0_2 __lowerCAmelCase : List[Any] = fs_config.encoder_ffn_embed_dim __lowerCAmelCase : int = 1E-5 __lowerCAmelCase : List[str] = fs_config.encoder_layerdrop __lowerCAmelCase : Tuple = fs_config.encoder_attention_heads __lowerCAmelCase : Union[str, Any] = fs_config.conv_pos_groups __lowerCAmelCase : int = fs_config.conv_pos __lowerCAmelCase : Union[str, Any] = len(lowercase__ ) __lowerCAmelCase : Optional[int] = fs_config.encoder_layers __lowerCAmelCase : List[str] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __lowerCAmelCase : Dict = model.cfg __lowerCAmelCase : List[Any] = fs_config.final_dropout __lowerCAmelCase : List[str] = fs_config.layerdrop __lowerCAmelCase : Optional[Any] = fs_config.activation_dropout __lowerCAmelCase : Optional[int] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __lowerCAmelCase : List[str] = fs_config.attention_dropout __lowerCAmelCase : List[Any] = fs_config.dropout_input __lowerCAmelCase : List[str] = fs_config.dropout __lowerCAmelCase : Dict = fs_config.mask_channel_length __lowerCAmelCase : Optional[Any] = fs_config.mask_channel_prob __lowerCAmelCase : str = fs_config.mask_length __lowerCAmelCase : Dict = fs_config.mask_prob __lowerCAmelCase : Optional[int] = '''Wav2Vec2FeatureExtractor''' __lowerCAmelCase : Optional[Any] = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ): if is_finetuned: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __lowerCAmelCase : Union[str, Any] = SEWConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : Tuple = convert_config(model[0] , lowercase__ ) __lowerCAmelCase : Optional[int] = model[0].eval() __lowerCAmelCase : Dict = True if config.feat_extract_norm == '''layer''' else False __lowerCAmelCase : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) if is_finetuned: if dict_path: __lowerCAmelCase : Optional[int] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCAmelCase : Any = target_dict.pad_index __lowerCAmelCase : Tuple = target_dict.bos_index __lowerCAmelCase : Any = target_dict.pad_index __lowerCAmelCase : Optional[Any] = target_dict.bos_index __lowerCAmelCase : Dict = target_dict.eos_index __lowerCAmelCase : List[str] = len(target_dict.symbols ) __lowerCAmelCase : Any = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , lowercase__ ) __lowerCAmelCase : Dict = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __lowerCAmelCase : Any = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowerCAmelCase : Optional[int] = SEWForCTC(lowercase__ ) else: __lowerCAmelCase : Optional[Any] = SEWModel(lowercase__ ) feature_extractor.save_pretrained(lowercase__ ) recursively_load_weights(lowercase__ , lowercase__ , lowercase__ ) hf_model.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
275
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ConsistencyModelPipeline _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _UpperCamelCase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase__ ( self , A_=False ) ->Dict: '''simple docstring''' if class_cond: __lowerCAmelCase : List[str] = self.dummy_cond_unet else: __lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Dict = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple: '''simple docstring''' if str(A_ ).startswith('''mps''' ): __lowerCAmelCase : str = torch.manual_seed(A_ ) else: __lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Any = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Dict = None __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = torch.manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ ) __lowerCAmelCase : Union[str, Any] = latents return inputs def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]: '''simple docstring''' if type(A_ ) == str: __lowerCAmelCase : int = torch.device(A_ ) __lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) return latents def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = self.get_inputs() __lowerCAmelCase : Any = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : List[Any] = self.get_inputs() __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
275
1
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __lowerCAmelCase : Dict = Vector() def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(A_ ) , '''(0,0,0,0,0,1)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3, 4] ) self.assertEqual(len(A_ ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Vector([1, 2] ) __lowerCAmelCase : Optional[int] = Vector([1, 2, 3, 4, 5] ) __lowerCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __lowerCAmelCase : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : List[Any] = Vector([2, -1, 4] ) # for test of dot product __lowerCAmelCase : Optional[int] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : Any = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , A_ , A_ ) ) , '''(3,4,7)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) __lowerCAmelCase : Optional[Any] = x.copy() self.assertEqual(str(A_ ) , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(A_ ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
275
from collections import deque from .hash_table import HashTable class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' super().__init__(*A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCAmelCase : int = self.values[key] def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , A_ , A_=None ) ->str: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
275
1
from ..utils import DummyObject, requires_backends class __lowercase (metaclass=_UpperCAmelCase ): _UpperCamelCase = ["""note_seq"""] def __init__( self , *A_ , **A_ ) ->Any: '''simple docstring''' requires_backends(self , ['''note_seq'''] ) @classmethod def UpperCamelCase__ ( cls , *A_ , **A_ ) ->List[Any]: '''simple docstring''' requires_backends(cls , ['''note_seq'''] ) @classmethod def UpperCamelCase__ ( cls , *A_ , **A_ ) ->int: '''simple docstring''' requires_backends(cls , ['''note_seq'''] )
275
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Optional[Any] = global_rng __lowerCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = parent __lowerCAmelCase : Optional[int] = batch_size __lowerCAmelCase : Any = min_seq_length __lowerCAmelCase : Tuple = max_seq_length __lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Dict = feature_size __lowerCAmelCase : Optional[int] = padding_value __lowerCAmelCase : Tuple = sampling_rate __lowerCAmelCase : Union[str, Any] = return_attention_mask __lowerCAmelCase : Dict = do_normalize def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WavaVecaFeatureExtractor def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : List[Any] = np.asarray(A_ ) __lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : str = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 ) __lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths] __lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : List[str] = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ ) __lowerCAmelCase : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : List[str] = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) __lowerCAmelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : int = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Optional[int] = feat_extract( A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' import torch __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa ) __lowerCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCamelCase__ ( self ) ->int: '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ ) __lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
275
1
import datasets from .evaluate import evaluate _UpperCamelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n" _UpperCamelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n" _UpperCamelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase (datasets.Metric ): def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def UpperCamelCase__ ( self , A_ , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Tuple = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} __lowerCAmelCase : List[str] = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] __lowerCAmelCase : Tuple = evaluate(dataset=A_ , predictions=A_ ) return score
275
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : List[Any] = use_input_mask __lowerCAmelCase : Optional[int] = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : int = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : List[str] = relative_attention __lowerCAmelCase : Union[str, Any] = position_biased_input __lowerCAmelCase : int = pos_att_type __lowerCAmelCase : List[Any] = scope def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCAmelCase : List[str] = None if self.use_token_type_ids: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : int = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_config() __lowerCAmelCase : Dict = 300 return config def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0] __lowerCAmelCase : List[str] = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = DebertaModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase (unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. __lowerCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
275
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowercase ( lowercase__ ): __lowerCAmelCase : str = [] __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : str = [] for rt in rc.restypes: __lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) __lowerCAmelCase : List[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Optional[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Tuple = torch.tensor( lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , ) __lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : int = residx_atomaa_mask __lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask __lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): __lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter] __lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __lowerCAmelCase : str = rc.atom_order[atom_name] __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : Any = residx_atomaa_mask return protein def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray ) __lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) ) return out
275
1
def _lowercase ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowerCAmelCase : int = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": _UpperCamelCase = input("Enter a string ").strip() _UpperCamelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
275
def _lowercase ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowerCAmelCase : int = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": _UpperCamelCase = input("Enter a string ").strip() _UpperCamelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
275
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[Any] = 0 @slow def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(A_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __lowerCAmelCase : str = AutoTokenizer.from_pretrained(A_ ) self.assertIsNotNone(A_ ) self.assertIsInstance(A_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(A_ ) , 0 ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(A_ ) self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(A_ ) self.assertIsInstance(A_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = AutoConfig.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) # Check that tokenizer_type ≠ model_type __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(A_ , config=A_ ) self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(A_ , '''vocab.txt''' ) ) __lowerCAmelCase : int = AutoTokenizer.from_pretrained(A_ , tokenizer_type='''bert''' , use_fast=A_ ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(A_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(A_ , '''merges.txt''' ) ) __lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(A_ , tokenizer_type='''gpt2''' , use_fast=A_ ) self.assertIsInstance(A_ , A_ ) @require_tokenizers def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(A_ , '''vocab.txt''' ) ) __lowerCAmelCase : int = AutoTokenizer.from_pretrained(A_ , tokenizer_type='''bert''' ) self.assertIsInstance(A_ , A_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(A_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(A_ , '''merges.txt''' ) ) __lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(A_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(A_ , A_ ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' with pytest.raises(A_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __lowerCAmelCase : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(A_ , A_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , A_ ) else: self.assertEqual(tokenizer.do_lower_case , A_ ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def UpperCamelCase__ ( self ) ->str: '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( A_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): __lowerCAmelCase : Tuple = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Tuple = TOKENIZER_MAPPING.values() __lowerCAmelCase : Dict = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(A_ ) @require_tokenizers def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=A_ ) , A_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , A_ ) @require_tokenizers def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=A_ ) __lowerCAmelCase : Optional[Any] = '''Hello, world. How are you?''' __lowerCAmelCase : Any = tokenizer.tokenize(A_ ) self.assertEqual('''[UNK]''' , tokens[0] ) __lowerCAmelCase : str = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=A_ ) __lowerCAmelCase : List[str] = tokenizer.tokenize(A_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(A_ ) , A_ ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 3_0000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ ) self.assertIsInstance(A_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ) __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(A_ ) self.assertIsInstance(A_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = get_tokenizer_config('''bert-base-cased''' ) __lowerCAmelCase : Optional[Any] = config.pop('''_commit_hash''' , A_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(A_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. __lowerCAmelCase : Any = get_tokenizer_config(A_ ) self.assertDictEqual(A_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ) __lowerCAmelCase : str = get_tokenizer_config(A_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' try: AutoConfig.register('''custom''' , A_ ) AutoTokenizer.register(A_ , slow_tokenizer_class=A_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): AutoTokenizer.register(A_ , slow_tokenizer_class=A_ ) __lowerCAmelCase : int = CustomTokenizer.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ) __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' try: AutoConfig.register('''custom''' , A_ ) # Can register in two steps AutoTokenizer.register(A_ , slow_tokenizer_class=A_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(A_ , fast_tokenizer_class=A_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( A_ , slow_tokenizer_class=A_ , fast_tokenizer_class=A_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(A_ ): AutoTokenizer.register(A_ , fast_tokenizer_class=A_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase : Tuple = BertTokenizerFast.from_pretrained(A_ ) bert_tokenizer.save_pretrained(A_ ) __lowerCAmelCase : Tuple = CustomTokenizerFast.from_pretrained(A_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ) __lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(A_ ) self.assertIsInstance(A_ , A_ ) __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(A_ , use_fast=A_ ) self.assertIsInstance(A_ , A_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' with self.assertRaises(A_ ): __lowerCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(A_ ): __lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ ) __lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ) __lowerCAmelCase : str = AutoTokenizer.from_pretrained(A_ , trust_remote_code=A_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version __lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ , use_fast=A_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A_ ) __lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(A_ , trust_remote_code=A_ , use_fast=A_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' class __lowercase (_UpperCAmelCase ): _UpperCamelCase = False class __lowercase (_UpperCAmelCase ): _UpperCamelCase = NewTokenizer _UpperCamelCase = False try: AutoConfig.register('''custom''' , A_ ) AutoTokenizer.register(A_ , slow_tokenizer_class=A_ ) AutoTokenizer.register(A_ , fast_tokenizer_class=A_ ) # If remote code is not set, the default is to use local __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=A_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. __lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ , use_fast=A_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub __lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) __lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=A_ , use_fast=A_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=A_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version __lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=A_ , use_fast=A_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( A_ , '''bert-base is not a local folder and is not a valid model identifier''' ): __lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' with self.assertRaisesRegex( A_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(A_ , revision='''aaaaaa''' ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
275
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None class __lowercase (_UpperCAmelCase , _UpperCAmelCase ): _UpperCamelCase = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]: '''simple docstring''' __lowerCAmelCase : str = num_inference_steps __lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ ) __lowerCAmelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) __lowerCAmelCase : str = sigma + gamma * sigma __lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = sample_prev + sigma_prev * model_output __lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' raise NotImplementedError()
275
1
def _lowercase ( lowercase__ ): if n_term == "": return [] __lowerCAmelCase : list = [] for temp in range(int(lowercase__ ) ): series.append(f"""1/{temp + 1}""" if series else '''1''' ) return series if __name__ == "__main__": _UpperCamelCase = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
275
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : int = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowerCAmelCase : int = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase : List[Any] = self.tokenizer.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCAmelCase : Optional[int] = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase : Any = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: __lowerCAmelCase : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] __lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features] __lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ ) __lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features] __lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ ) __lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
275
1
import requests from bsa import BeautifulSoup def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : int = BeautifulSoup(requests.get(lowercase__ , params=lowercase__ ).content , '''html.parser''' ) __lowerCAmelCase : Optional[int] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) __lowerCAmelCase : int = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": _UpperCamelCase = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
275
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): _UpperCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = (3, 32, 128) __lowerCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off __lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) __lowerCAmelCase : Union[str, Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __lowerCAmelCase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Optional[int] = self.prepare_image_inputs() __lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' ) __lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Any = '''test''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : str = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = '''test''' __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : int = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Optional[int] = processor.char_decode(A_ ) __lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ ) __lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 ) __lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 ) __lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 ) __lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
275
1
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _UpperCamelCase = logging.get_logger(__name__) class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->None: '''simple docstring''' warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , A_ , ) super().__init__(*A_ , **A_ )
275
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
1
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _UpperCamelCase = logging.getLogger() def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[int] = '''\n'''.join(lowercase__ ) Path(lowercase__ ).open('''w''' ).writelines(lowercase__ ) _UpperCamelCase = "patrickvonplaten/t5-tiny-random" _UpperCamelCase = "sshleifer/bart-tiny-random" _UpperCamelCase = "sshleifer/tiny-mbart" _UpperCamelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' __lowerCAmelCase : Any = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() __lowerCAmelCase : int = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(A_ , A_ ) __lowerCAmelCase : Optional[int] = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) __lowerCAmelCase : Optional[int] = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' __lowerCAmelCase : Union[str, Any] = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(A_ , '''argv''' , A_ ): run_generate() assert Path(A_ ).exists() # os.remove(Path(output_file_name)) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' self.run_eval_tester(A_ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' self.run_eval_tester(A_ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' __lowerCAmelCase : Optional[int] = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() __lowerCAmelCase : Tuple = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } __lowerCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) __lowerCAmelCase : int = str(tmp_dir / '''scores.json''' ) __lowerCAmelCase : Dict = str(tmp_dir / '''val.target''' ) _dump_articles(A_ , text['''en'''] ) _dump_articles(A_ , text['''de'''] ) __lowerCAmelCase : Any = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' __lowerCAmelCase : Optional[int] = f""" run_eval_search.py {model} {str(A_ )} {str(A_ )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(A_ , '''argv''' , A_ ): with CaptureStdout() as cs: run_search() __lowerCAmelCase : Optional[int] = [''' num_beams | length_penalty''', model, '''Best score args'''] __lowerCAmelCase : List[Any] = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(A_ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(A_ ).exists() os.remove(Path(A_ ) )
275
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _UpperCamelCase = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _UpperCamelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _UpperCamelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] ) return (item, float(lowercase__ )) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 ) __lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:] __lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = list(lowercase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCAmelCase : int = random.choice(lowercase__ ) return "".join(lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : str = [] # Generate more children proportionally to the fitness score. __lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1 __lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n for _ in range(lowercase__ ): __lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0] __lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ ) # Append new string to the population list. pop.append(mutate(lowercase__ , lowercase__ ) ) pop.append(mutate(lowercase__ , lowercase__ ) ) return pop def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(lowercase__ ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCAmelCase : Any = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(lowercase__ ) # Generate random starting population. __lowerCAmelCase : List[Any] = [] for _ in range(lowercase__ ): population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowercase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population] # Check if there is a matching evolution. __lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowercase__ ) # Normalize population score to be between 0 and 1. __lowerCAmelCase : List[Any] = [ (item, score / len(lowercase__ )) for item, score in population_score ] # This is selection for i in range(lowercase__ ): population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowercase__ ) > N_POPULATION: break if __name__ == "__main__": _UpperCamelCase = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _UpperCamelCase = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
275
1
from __future__ import annotations def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): if len(lowercase__ ) == 0: raise ValueError('''find_max() arg is an empty sequence''' ) if ( left >= len(lowercase__ ) or left < -len(lowercase__ ) or right >= len(lowercase__ ) or right < -len(lowercase__ ) ): raise IndexError('''list index out of range''' ) if left == right: return nums[left] __lowerCAmelCase : Optional[int] = (left + right) >> 1 # the middle __lowerCAmelCase : Optional[int] = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid] __lowerCAmelCase : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
275
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _UpperCamelCase = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model _UpperCamelCase = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.15}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names _UpperCamelCase = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _UpperCamelCase = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: _UpperCamelCase = "allenai" def _lowercase ( lowercase__ ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} __lowerCAmelCase : Union[str, Any] = dict((re.sub(r'''@@$''' , '''''' , lowercase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , lowercase__ ), v) for k, v in d.items() ) __lowerCAmelCase : Tuple = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] __lowerCAmelCase : int = d[k] # restore return da def _lowercase ( lowercase__ , lowercase__ ): # prep assert os.path.exists(lowercase__ ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models __lowerCAmelCase : Optional[Any] = basename(lowercase__ ) __lowerCAmelCase : Union[str, Any] = dirname(lowercase__ ) __lowerCAmelCase : Tuple = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel __lowerCAmelCase : int = cls.hub_models() __lowerCAmelCase : str = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} __lowerCAmelCase : Union[str, Any] = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"""using checkpoint {checkpoint_file}""" ) __lowerCAmelCase : Union[str, Any] = hub_utils.from_pretrained( lowercase__ , lowercase__ , lowercase__ , archive_map=lowercase__ , **lowercase__ ) __lowerCAmelCase : Dict = vars(chkpt['''args''']['''model'''] ) __lowerCAmelCase : Union[str, Any] = args['''source_lang'''] __lowerCAmelCase : List[Any] = args['''target_lang'''] __lowerCAmelCase : int = dirname(lowercase__ ) __lowerCAmelCase : int = basename(lowercase__ ) # dicts __lowerCAmelCase : Optional[Any] = os.path.join(lowercase__ , f"""dict.{src_lang}.txt""" ) __lowerCAmelCase : Optional[Any] = os.path.join(lowercase__ , f"""dict.{tgt_lang}.txt""" ) __lowerCAmelCase : Union[str, Any] = Dictionary.load(lowercase__ ) __lowerCAmelCase : Optional[int] = rewrite_dict_keys(src_dict.indices ) __lowerCAmelCase : str = len(lowercase__ ) __lowerCAmelCase : Optional[Any] = os.path.join(lowercase__ , '''vocab-src.json''' ) print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab __lowerCAmelCase : List[Any] = True for k in src_vocab.keys(): if not k.islower(): __lowerCAmelCase : Union[str, Any] = False break __lowerCAmelCase : int = Dictionary.load(lowercase__ ) __lowerCAmelCase : Optional[Any] = rewrite_dict_keys(tgt_dict.indices ) __lowerCAmelCase : Tuple = len(lowercase__ ) __lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab-tgt.json''' ) print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) ) # merges_file (bpecodes) __lowerCAmelCase : Optional[int] = os.path.join(lowercase__ , VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" __lowerCAmelCase : Dict = os.path.join(lowercase__ , lowercase__ ) if os.path.exists(lowercase__ ): break with open(lowercase__ , encoding='''utf-8''' ) as fin: __lowerCAmelCase : Dict = fin.read() __lowerCAmelCase : Tuple = re.sub(r''' \d+$''' , '''''' , lowercase__ , 0 , re.M ) # remove frequency number print(f"""Generating {merges_file}""" ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as fout: fout.write(lowercase__ ) # model config __lowerCAmelCase : str = os.path.join(lowercase__ , '''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}""" assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}""" __lowerCAmelCase : Dict = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.0_2, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with __lowerCAmelCase : int = 5 __lowerCAmelCase : Optional[int] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: __lowerCAmelCase : Optional[int] = best_score_hparams[model_dir]['''length_penalty'''] else: __lowerCAmelCase : Optional[Any] = 1.0 print(f"""Generating {fsmt_model_config_file}""" ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) ) # tokenizer config __lowerCAmelCase : Tuple = os.path.join(lowercase__ , lowercase__ ) __lowerCAmelCase : int = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 1_0_2_4, '''do_lower_case''': do_lower_case, } print(f"""Generating {fsmt_tokenizer_config_file}""" ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) ) # model __lowerCAmelCase : List[Any] = chkpt['''models'''][0] __lowerCAmelCase : List[Any] = model.state_dict() # rename keys to start with 'model.' __lowerCAmelCase : Any = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys __lowerCAmelCase : List[str] = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(lowercase__ , lowercase__ ) __lowerCAmelCase : Optional[Any] = FSMTConfig.from_pretrained(lowercase__ ) __lowerCAmelCase : Any = FSMTForConditionalGeneration(lowercase__ ) # check that it loads ok model_new.load_state_dict(lowercase__ , strict=lowercase__ ) # save __lowerCAmelCase : Dict = os.path.join(lowercase__ , lowercase__ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(lowercase__ , lowercase__ ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(f"""cd {data_root}""" ) print(f"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _UpperCamelCase = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
275
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """table-transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A_ , A_ ): __lowerCAmelCase : int = backbone_config.get('''model_type''' ) __lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase : Any = config_class.from_dict(A_ ) # set timm attributes to None __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None __lowerCAmelCase : Tuple = use_timm_backbone __lowerCAmelCase : Optional[Any] = backbone_config __lowerCAmelCase : List[str] = num_channels __lowerCAmelCase : Tuple = num_queries __lowerCAmelCase : int = d_model __lowerCAmelCase : List[Any] = encoder_ffn_dim __lowerCAmelCase : Optional[int] = encoder_layers __lowerCAmelCase : List[str] = encoder_attention_heads __lowerCAmelCase : str = decoder_ffn_dim __lowerCAmelCase : Union[str, Any] = decoder_layers __lowerCAmelCase : Any = decoder_attention_heads __lowerCAmelCase : Optional[int] = dropout __lowerCAmelCase : Any = attention_dropout __lowerCAmelCase : Tuple = activation_dropout __lowerCAmelCase : Optional[Any] = activation_function __lowerCAmelCase : List[str] = init_std __lowerCAmelCase : Tuple = init_xavier_std __lowerCAmelCase : Any = encoder_layerdrop __lowerCAmelCase : List[Any] = decoder_layerdrop __lowerCAmelCase : Optional[Any] = encoder_layers __lowerCAmelCase : Optional[Any] = auxiliary_loss __lowerCAmelCase : Optional[Any] = position_embedding_type __lowerCAmelCase : Tuple = backbone __lowerCAmelCase : Any = use_pretrained_backbone __lowerCAmelCase : int = dilation # Hungarian matcher __lowerCAmelCase : Dict = class_cost __lowerCAmelCase : List[str] = bbox_cost __lowerCAmelCase : int = giou_cost # Loss coefficients __lowerCAmelCase : Optional[Any] = mask_loss_coefficient __lowerCAmelCase : Tuple = dice_loss_coefficient __lowerCAmelCase : int = bbox_loss_coefficient __lowerCAmelCase : List[Any] = giou_loss_coefficient __lowerCAmelCase : int = eos_coefficient super().__init__(is_encoder_decoder=A_ , **A_ ) @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.d_model class __lowercase (_UpperCAmelCase ): _UpperCamelCase = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def UpperCamelCase__ ( self ) ->float: '''simple docstring''' return 1e-5 @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return 12
275
1
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {} _UpperCamelCase = {} _UpperCamelCase = {} def _lowercase ( lowercase__ , lowercase__ , lowercase__ = None , ): __lowerCAmelCase : List[str] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" ) __lowerCAmelCase : Tuple = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" ) __lowerCAmelCase : Tuple = format_type def _lowercase ( lowercase__ , lowercase__ , lowercase__ = None ): __lowerCAmelCase : Optional[int] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __lowerCAmelCase : Any = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: _UpperCamelCase = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: _UpperCamelCase = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: _UpperCamelCase = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def _lowercase ( lowercase__ ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _lowercase ( lowercase__ , **lowercase__ ): __lowerCAmelCase : str = get_format_type_from_alias(lowercase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowercase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
275
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Any = global_rng __lowerCAmelCase : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : str = min_seq_length __lowerCAmelCase : int = max_seq_length __lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Any = padding_value __lowerCAmelCase : str = sampling_rate __lowerCAmelCase : Optional[Any] = return_attention_mask __lowerCAmelCase : Optional[Any] = do_normalize __lowerCAmelCase : Optional[Any] = feature_size __lowerCAmelCase : Optional[int] = chunk_length __lowerCAmelCase : Optional[Any] = hop_length def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0] check_json_file_has_correct_format(A_ ) __lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ ) __lowerCAmelCase : Dict = feat_extract_first.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters __lowerCAmelCase : Dict = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A_ ) __lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ ) __lowerCAmelCase : List[str] = feat_extract_first.to_dict() __lowerCAmelCase : Tuple = feat_extract_second.to_dict() __lowerCAmelCase : Any = feat_extract_first.mel_filters __lowerCAmelCase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : Optional[int] = np.asarray(A_ ) __lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] __lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated] __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' import torch __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) __lowerCAmelCase : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on __lowerCAmelCase : int = self._load_datasamples(1 ) __lowerCAmelCase : Any = WhisperFeatureExtractor() __lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = self._load_datasamples(1 )[0] __lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue __lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0] self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
275
1
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class __lowercase (_UpperCAmelCase ): def __init__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Any = [] def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->str: '''simple docstring''' self.events.append('''on_init_end''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Any: '''simple docstring''' self.events.append('''on_train_begin''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Any: '''simple docstring''' self.events.append('''on_train_end''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' self.events.append('''on_epoch_begin''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Tuple: '''simple docstring''' self.events.append('''on_epoch_end''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' self.events.append('''on_step_begin''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Tuple: '''simple docstring''' self.events.append('''on_step_end''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Any: '''simple docstring''' self.events.append('''on_evaluate''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' self.events.append('''on_predict''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Optional[int]: '''simple docstring''' self.events.append('''on_save''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Union[str, Any]: '''simple docstring''' self.events.append('''on_log''' ) def UpperCamelCase__ ( self , A_ , A_ , A_ , **A_ ) ->Optional[int]: '''simple docstring''' self.events.append('''on_prediction_step''' ) @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = tempfile.mkdtemp() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' shutil.rmtree(self.output_dir ) def UpperCamelCase__ ( self , A_=0 , A_=0 , A_=64 , A_=64 , A_=None , A_=False , **A_ ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = RegressionDataset(length=A_ ) __lowerCAmelCase : Dict = RegressionDataset(length=A_ ) __lowerCAmelCase : Tuple = RegressionModelConfig(a=A_ , b=A_ ) __lowerCAmelCase : List[str] = RegressionPreTrainedModel(A_ ) __lowerCAmelCase : str = TrainingArguments(self.output_dir , disable_tqdm=A_ , report_to=[] , **A_ ) return Trainer( A_ , A_ , train_dataset=A_ , eval_dataset=A_ , callbacks=A_ , ) def UpperCamelCase__ ( self , A_ , A_ ) ->Dict: '''simple docstring''' self.assertEqual(len(A_ ) , len(A_ ) ) # Order doesn't matter __lowerCAmelCase : Optional[int] = sorted(A_ , key=lambda A_ : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ ) __lowerCAmelCase : Any = sorted(A_ , key=lambda A_ : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ ) for cba, cba in zip(A_ , A_ ): if isinstance(A_ , A_ ) and isinstance(A_ , A_ ): self.assertEqual(A_ , A_ ) elif isinstance(A_ , A_ ) and not isinstance(A_ , A_ ): self.assertEqual(A_ , cba.__class__ ) elif not isinstance(A_ , A_ ) and isinstance(A_ , A_ ): self.assertEqual(cba.__class__ , A_ ) else: self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : str = ['''on_init_end''', '''on_train_begin'''] __lowerCAmelCase : str = 0 __lowerCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() ) __lowerCAmelCase : List[str] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(A_ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Any = self.get_trainer() __lowerCAmelCase : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) # Callbacks passed at init are added to the default callbacks __lowerCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowerCAmelCase : Dict = self.get_trainer(disable_tqdm=A_ ) __lowerCAmelCase : Any = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowerCAmelCase : Union[str, Any] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A_ ) expected_callbacks.remove(A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) __lowerCAmelCase : Tuple = self.get_trainer() __lowerCAmelCase : Union[str, Any] = trainer.pop_callback(A_ ) self.assertEqual(cb.__class__ , A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) trainer.add_callback(A_ ) expected_callbacks.insert(0 , A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) # We can also add, pop, or remove by instance __lowerCAmelCase : Any = self.get_trainer() __lowerCAmelCase : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(A_ ) expected_callbacks.remove(A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) __lowerCAmelCase : List[str] = self.get_trainer() __lowerCAmelCase : List[str] = trainer.callback_handler.callbacks[0] __lowerCAmelCase : Union[str, Any] = trainer.pop_callback(A_ ) self.assertEqual(A_ , A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) trainer.add_callback(A_ ) expected_callbacks.insert(0 , A_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' , category=A_ ) __lowerCAmelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowerCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A_ , self.get_expected_events(A_ ) ) # Independent log/save/eval __lowerCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() __lowerCAmelCase : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(A_ , self.get_expected_events(A_ ) ) __lowerCAmelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() __lowerCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A_ , self.get_expected_events(A_ ) ) __lowerCAmelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' ) trainer.train() __lowerCAmelCase : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A_ , self.get_expected_events(A_ ) ) __lowerCAmelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' ) trainer.train() __lowerCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(A_ , self.get_expected_events(A_ ) ) # A bit of everything __lowerCAmelCase : List[Any] = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , ) trainer.train() __lowerCAmelCase : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A_ , self.get_expected_events(A_ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: __lowerCAmelCase : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A_ ) in warn_mock.call_args[0][0]
275
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } _UpperCamelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = {} with open(lowercase__ , '''r''' ) as file: for line_number, line in enumerate(lowercase__ ): __lowerCAmelCase : Any = line.strip() if line: __lowerCAmelCase : Dict = line.split() __lowerCAmelCase : str = line_number __lowerCAmelCase : List[str] = words[0] __lowerCAmelCase : Any = value return result def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : List[Any] = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : List[Any] = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : str = getattr(lowercase__ , lowercase__ ).shape elif weight_type is not None and weight_type == "param": __lowerCAmelCase : Dict = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : str = shape_pointer.shape # let's reduce dimension __lowerCAmelCase : Any = value[0] else: __lowerCAmelCase : str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[str] = value elif weight_type == "weight_v": __lowerCAmelCase : int = value elif weight_type == "bias": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = value else: __lowerCAmelCase : Any = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : int = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : Tuple = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCAmelCase : List[str] = '''.'''.join([key, hf_param_name] ) else: __lowerCAmelCase : Optional[int] = key __lowerCAmelCase : Union[str, Any] = value if '''lm_head''' in full_key else value[0] _UpperCamelCase = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): __lowerCAmelCase : Any = False for key, mapped_key in MAPPING.items(): __lowerCAmelCase : Tuple = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Optional[Any] = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : List[Any] = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : List[Any] = '''weight_v''' elif "bias" in name: __lowerCAmelCase : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : int = '''weight''' else: __lowerCAmelCase : Any = None if hf_dict is not None: rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return is_used return is_used def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() __lowerCAmelCase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Any = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : int = True else: __lowerCAmelCase : Dict = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ ) if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : List[str] = name.split('''.''' ) __lowerCAmelCase : Any = int(items[0] ) __lowerCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ): if config_path is not None: __lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : Optional[int] = WavaVecaConfig() if is_seq_class: __lowerCAmelCase : Optional[Any] = read_txt_into_dict(lowercase__ ) __lowerCAmelCase : int = idalabel __lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowercase__ ) __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) feature_extractor.save_pretrained(lowercase__ ) elif is_finetuned: if dict_path: __lowerCAmelCase : List[str] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCAmelCase : List[Any] = target_dict.pad_index __lowerCAmelCase : List[Any] = target_dict.bos_index __lowerCAmelCase : Optional[int] = target_dict.eos_index __lowerCAmelCase : Any = len(target_dict.symbols ) __lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowerCAmelCase : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : int = 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __lowerCAmelCase : Dict = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __lowerCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowerCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowerCAmelCase : str = WavaVecaForCTC(lowercase__ ) else: __lowerCAmelCase : Any = WavaVecaForPreTraining(lowercase__ ) if is_finetuned or is_seq_class: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) __lowerCAmelCase : str = fairseq.tasks.setup_task(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __lowerCAmelCase : int = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
275
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCamelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : List[str] = getattr(lowercase__ , lowercase__ ) if weight_type is not None: __lowerCAmelCase : Any = getattr(lowercase__ , lowercase__ ).shape else: __lowerCAmelCase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : str = value elif weight_type == "weight_g": __lowerCAmelCase : str = value elif weight_type == "weight_v": __lowerCAmelCase : List[Any] = value elif weight_type == "bias": __lowerCAmelCase : Tuple = value else: __lowerCAmelCase : Dict = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = [] __lowerCAmelCase : int = fairseq_model.state_dict() __lowerCAmelCase : Any = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowerCAmelCase : Any = None for name, value in fairseq_dict.items(): __lowerCAmelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : Union[str, Any] = True elif name.split('''.''' )[0] == "proj": __lowerCAmelCase : List[Any] = fairseq_model.proj __lowerCAmelCase : Tuple = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Optional[Any] = True if "*" in mapped_key: __lowerCAmelCase : str = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : Tuple = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : List[Any] = '''weight_v''' elif "bias" in name: __lowerCAmelCase : int = '''bias''' elif "weight" in name: __lowerCAmelCase : Optional[int] = '''weight''' else: __lowerCAmelCase : str = None set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) continue if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[Any] = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : Optional[int] = name.split('''.''' ) __lowerCAmelCase : str = int(items[0] ) __lowerCAmelCase : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : str = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : str = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) def _lowercase ( lowercase__ ): __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = emb.weight.shape __lowerCAmelCase : Dict = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) __lowerCAmelCase : Optional[Any] = emb.weight.data return lin_layer def _lowercase ( lowercase__ ): with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f: __lowerCAmelCase : Tuple = f.readlines() __lowerCAmelCase : Tuple = [line.split(''' ''' )[0] for line in lines] __lowerCAmelCase : Tuple = len(lowercase__ ) __lowerCAmelCase : List[str] = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(lowercase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(lowercase__ ) __lowerCAmelCase : Union[str, Any] = SpeechaTextaConfig.from_pretrained( lowercase__ , vocab_size=lowercase__ , decoder_layers=lowercase__ , do_stable_layer_norm=lowercase__ ) __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __lowerCAmelCase : List[Any] = model[0].eval() # set weights for wav2vec2 encoder __lowerCAmelCase : List[Any] = WavaVecaModel(lowercase__ ) __lowerCAmelCase : Any = recursively_load_weights_wavaveca(model.encoder , lowercase__ ) __lowerCAmelCase : int = SpeechaTextaForCausalLM(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase__ ) # set output linear layer unexpected_keys.remove('''embed_out''' ) __lowerCAmelCase : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __lowerCAmelCase : Optional[Any] = SpeechEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ ) __lowerCAmelCase : List[str] = False # add projection layer __lowerCAmelCase : List[Any] = nn.Parameter(projection_layer.weight ) __lowerCAmelCase : Any = nn.Parameter(projection_layer.bias ) __lowerCAmelCase : Dict = create_vocab_dict(lowercase__ ) with open(os.path.join(lowercase__ , '''vocab.json''' ) , '''w''' ) as fp: json.dump(lowercase__ , lowercase__ ) __lowerCAmelCase : str = SpeechaTextaTokenizer(os.path.join(lowercase__ , '''vocab.json''' ) ) tokenizer.save_pretrained(lowercase__ ) __lowerCAmelCase : Any = hf_wavavec.config.to_dict() __lowerCAmelCase : List[Any] = tokenizer.pad_token_id __lowerCAmelCase : int = tokenizer.bos_token_id __lowerCAmelCase : str = tokenizer.eos_token_id __lowerCAmelCase : Optional[Any] = '''speech_to_text_2''' __lowerCAmelCase : Optional[int] = '''wav2vec2''' __lowerCAmelCase : int = SpeechEncoderDecoderConfig.from_dict(lowercase__ ) hf_wavavec.save_pretrained(lowercase__ ) feature_extractor.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") _UpperCamelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
275
from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """trajectory_transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int: '''simple docstring''' __lowerCAmelCase : Any = vocab_size __lowerCAmelCase : Tuple = action_weight __lowerCAmelCase : Tuple = reward_weight __lowerCAmelCase : Union[str, Any] = value_weight __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : str = block_size __lowerCAmelCase : Optional[Any] = action_dim __lowerCAmelCase : Union[str, Any] = observation_dim __lowerCAmelCase : Union[str, Any] = transition_dim __lowerCAmelCase : Dict = learning_rate __lowerCAmelCase : Any = n_layer __lowerCAmelCase : Any = n_head __lowerCAmelCase : Optional[int] = n_embd __lowerCAmelCase : str = embd_pdrop __lowerCAmelCase : Dict = attn_pdrop __lowerCAmelCase : Optional[int] = resid_pdrop __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : Any = kaiming_initializer_range __lowerCAmelCase : List[str] = use_cache super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
275
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = XGLMTokenizer _UpperCamelCase = XGLMTokenizerFast _UpperCamelCase = True _UpperCamelCase = True def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase : Any = XGLMTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Any = '''<pad>''' __lowerCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(A_ ) , 1008 ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = XGLMTokenizer(A_ , keep_accents=A_ ) __lowerCAmelCase : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowerCAmelCase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(A_ , f.name ) __lowerCAmelCase : List[Any] = XGLMTokenizer(f.name , keep_accents=A_ ) __lowerCAmelCase : Union[str, Any] = pickle.dumps(A_ ) pickle.loads(A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return __lowerCAmelCase : Optional[int] = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_rust_tokenizer() __lowerCAmelCase : List[Any] = '''I was born in 92000, and this is falsé.''' __lowerCAmelCase : str = tokenizer.tokenize(A_ ) __lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) __lowerCAmelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ ) __lowerCAmelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) __lowerCAmelCase : List[Any] = self.get_rust_tokenizer() __lowerCAmelCase : str = tokenizer.encode(A_ ) __lowerCAmelCase : List[Any] = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) @slow def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''Hello World!''' __lowerCAmelCase : Optional[int] = [2, 3_1227, 4447, 35] self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off __lowerCAmelCase : Optional[int] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) ) @slow def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = { '''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name='''facebook/xglm-564M''' , padding=A_ , )
275
def _lowercase ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b" __lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:] __lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
1
from math import pi, sqrt, tan def _lowercase ( lowercase__ ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _lowercase ( lowercase__ ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def _lowercase ( lowercase__ ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def _lowercase ( lowercase__ , lowercase__ ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __lowerCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _lowercase ( lowercase__ , lowercase__ ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def _lowercase ( lowercase__ , lowercase__ ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(lowercase__ , 2 ) * torus_radius * tube_radius def _lowercase ( lowercase__ , lowercase__ ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def _lowercase ( lowercase__ ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def _lowercase ( lowercase__ , lowercase__ ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __lowerCAmelCase : Any = (sidea + sidea + sidea) / 2 __lowerCAmelCase : Any = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _lowercase ( lowercase__ , lowercase__ ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def _lowercase ( lowercase__ ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def _lowercase ( lowercase__ , lowercase__ ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def _lowercase ( lowercase__ , lowercase__ ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def _lowercase ( lowercase__ , lowercase__ ): if not isinstance(lowercase__ , lowercase__ ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(F"Rectangle: {area_rectangle(10, 20) = }") print(F"Square: {area_square(10) = }") print(F"Triangle: {area_triangle(10, 10) = }") print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(F"Parallelogram: {area_parallelogram(10, 20) = }") print(F"Rhombus: {area_rhombus(10, 20) = }") print(F"Trapezium: {area_trapezium(10, 20, 30) = }") print(F"Circle: {area_circle(20) = }") print(F"Ellipse: {area_ellipse(10, 20) = }") print("\nSurface Areas of various geometric shapes: \n") print(F"Cube: {surface_area_cube(20) = }") print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(F"Sphere: {surface_area_sphere(20) = }") print(F"Hemisphere: {surface_area_hemisphere(20) = }") print(F"Cone: {surface_area_cone(10, 20) = }") print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(F"Cylinder: {surface_area_cylinder(10, 20) = }") print(F"Torus: {surface_area_torus(20, 10) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(F"Square: {area_reg_polygon(4, 10) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
275
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ): __lowerCAmelCase : int = round(val / multiple ) * multiple if max_val is not None and x > max_val: __lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple return x __lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : int = output_size # determine new height and width __lowerCAmelCase : Optional[Any] = output_height / input_height __lowerCAmelCase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __lowerCAmelCase : str = scale_width else: # fit height __lowerCAmelCase : str = scale_height __lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ ) __lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ ) return (new_height, new_width) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384} __lowerCAmelCase : Dict = get_size_dict(A_ ) __lowerCAmelCase : Optional[Any] = do_resize __lowerCAmelCase : int = size __lowerCAmelCase : Dict = keep_aspect_ratio __lowerCAmelCase : List[Any] = ensure_multiple_of __lowerCAmelCase : Tuple = resample __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Any = rescale_factor __lowerCAmelCase : List[Any] = do_normalize __lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : int = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase : Union[str, Any] = get_resize_output_image_size( A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image: '''simple docstring''' __lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Optional[int] = size if size is not None else self.size __lowerCAmelCase : Union[str, Any] = get_size_dict(A_ ) __lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __lowerCAmelCase : Tuple = resample if resample is not None else self.resample __lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __lowerCAmelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : Dict = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __lowerCAmelCase : Optional[int] = target_sizes.numpy() __lowerCAmelCase : List[str] = [] for idx in range(len(A_ ) ): __lowerCAmelCase : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __lowerCAmelCase : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __lowerCAmelCase : Any = logits.argmax(dim=1 ) __lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
275
1
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = get_activation('''swish''' ) self.assertIsInstance(A_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Tuple = get_activation('''silu''' ) self.assertIsInstance(A_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = get_activation('''mish''' ) self.assertIsInstance(A_ , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = get_activation('''gelu''' ) self.assertIsInstance(A_ , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
275
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __lowerCAmelCase : Dict = Vector() def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(A_ ) , '''(0,0,0,0,0,1)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3, 4] ) self.assertEqual(len(A_ ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Vector([1, 2] ) __lowerCAmelCase : Optional[int] = Vector([1, 2, 3, 4, 5] ) __lowerCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __lowerCAmelCase : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : List[Any] = Vector([2, -1, 4] ) # for test of dot product __lowerCAmelCase : Optional[int] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : Any = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , A_ , A_ ) ) , '''(3,4,7)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) __lowerCAmelCase : Optional[Any] = x.copy() self.assertEqual(str(A_ ) , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(A_ ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
275
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = 0.9 , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = 1 / 255 , A_ = True , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Optional[int] = size if size is not None else {'''shortest_edge''': 224} __lowerCAmelCase : List[str] = get_size_dict(A_ , default_to_square=A_ ) __lowerCAmelCase : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCAmelCase : List[Any] = get_size_dict(A_ , param_name='''crop_size''' ) __lowerCAmelCase : int = do_resize __lowerCAmelCase : Union[str, Any] = size __lowerCAmelCase : List[Any] = crop_pct __lowerCAmelCase : Optional[int] = resample __lowerCAmelCase : str = do_center_crop __lowerCAmelCase : Dict = crop_size __lowerCAmelCase : List[str] = do_rescale __lowerCAmelCase : Union[str, Any] = rescale_factor __lowerCAmelCase : Dict = do_normalize __lowerCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __lowerCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : Any = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: __lowerCAmelCase : Optional[Any] = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __lowerCAmelCase : str = int(size['''height'''] / crop_pct ) else: __lowerCAmelCase : List[Any] = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(A_ ) ) __lowerCAmelCase : Tuple = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ ) else: if "shortest_edge" in size: __lowerCAmelCase : Optional[Any] = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ ) elif "height" in size and "width" in size: __lowerCAmelCase : str = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(A_ ) ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : List[Any] = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Union[str, Any]: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image: '''simple docstring''' __lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : int = crop_pct if crop_pct is not None else self.crop_pct __lowerCAmelCase : str = resample if resample is not None else self.resample __lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std __lowerCAmelCase : Tuple = size if size is not None else self.size __lowerCAmelCase : Any = get_size_dict(A_ , default_to_square=A_ ) __lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size __lowerCAmelCase : List[Any] = get_size_dict(A_ , param_name='''crop_size''' ) __lowerCAmelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : int = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : Tuple = [self.resize(image=A_ , size=A_ , crop_pct=A_ , resample=A_ ) for image in images] if do_center_crop: __lowerCAmelCase : Optional[int] = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Optional[int] = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : Optional[int] = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : str = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ )
275
def _lowercase ( lowercase__ , lowercase__ ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
275
1
def _lowercase ( lowercase__ ): if num < 0: return False __lowerCAmelCase : int = num __lowerCAmelCase : int = 0 while num > 0: __lowerCAmelCase : Optional[Any] = rev_num * 1_0 + (num % 1_0) num //= 1_0 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
275
def _lowercase ( lowercase__ , lowercase__ ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _lowercase ( lowercase__ , lowercase__=0 ): return sorted(lowercase__ , key=lambda lowercase__ : x[column] ) def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase__ ): __lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : Tuple = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(min(6 , points_counts - 1 ) , lowercase__ ): for j in range(max(0 , i - 6 ) , lowercase__ ): __lowerCAmelCase : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : int = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): # base case if points_counts <= 3: return dis_between_closest_pair(lowercase__ , lowercase__ ) # recursion __lowerCAmelCase : Optional[Any] = points_counts // 2 __lowerCAmelCase : Optional[Any] = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[:mid] , lowercase__ ) __lowerCAmelCase : str = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[mid:] , points_counts - mid ) __lowerCAmelCase : Optional[int] = min(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase__ ) __lowerCAmelCase : List[Any] = dis_between_closest_in_strip( lowercase__ , len(lowercase__ ) , lowercase__ ) return min(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = column_based_sort(lowercase__ , column=0 ) __lowerCAmelCase : Any = column_based_sort(lowercase__ , column=1 ) return ( closest_pair_of_points_sqr( lowercase__ , lowercase__ , lowercase__ ) ) ** 0.5 if __name__ == "__main__": _UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
275
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[Any] = s.rsplit(lowercase__ , lowercase__ ) return new.join(lowercase__ ) def _lowercase ( lowercase__ ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def _lowercase ( lowercase__ ): __lowerCAmelCase : int = {} __lowerCAmelCase : Tuple = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowerCAmelCase : str = key.replace(f"""{group_key}.""" , f"""{group_key}.group.""" ) if "res_path" in key: __lowerCAmelCase : List[Any] = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): __lowerCAmelCase : Union[str, Any] = rreplace(lowercase__ , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): __lowerCAmelCase : Dict = rreplace(lowercase__ , '''.b''' , '''.bias''' , 1 ) __lowerCAmelCase : Union[str, Any] = value.float() return upgrade @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=True ): from dall_e import Encoder __lowerCAmelCase : Any = Encoder() if os.path.exists(lowercase__ ): __lowerCAmelCase : Dict = torch.load(lowercase__ ) else: __lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(lowercase__ ) if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[Any] = ckpt.state_dict() encoder.load_state_dict(lowercase__ ) if config_path is not None: __lowerCAmelCase : Dict = FlavaImageCodebookConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : int = FlavaImageCodebookConfig() __lowerCAmelCase : List[Any] = FlavaImageCodebook(lowercase__ ).eval() __lowerCAmelCase : Union[str, Any] = encoder.state_dict() __lowerCAmelCase : Dict = upgrade_state_dict(lowercase__ ) hf_model.load_state_dict(lowercase__ ) __lowerCAmelCase : Union[str, Any] = hf_model.state_dict() __lowerCAmelCase : Optional[Any] = count_parameters(lowercase__ ) __lowerCAmelCase : List[Any] = count_parameters(lowercase__ ) assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(lowercase__ ) else: return hf_state_dict if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") _UpperCamelCase = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
275
def _lowercase ( lowercase__ = 2_0_0 ): __lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __lowerCAmelCase : Dict = [0] * (pence + 1) __lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
275
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _UpperCamelCase = "CompVis/stable-diffusion-v1-1" _UpperCamelCase = "CompVis/stable-diffusion-v1-2" _UpperCamelCase = "CompVis/stable-diffusion-v1-3" _UpperCamelCase = "CompVis/stable-diffusion-v1-4" class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[str, Any]: '''simple docstring''' super()._init_() __lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(A_ ) __lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(A_ ) __lowerCAmelCase : Any = StableDiffusionPipeline.from_pretrained(A_ ) __lowerCAmelCase : Tuple = StableDiffusionPipeline( vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , requires_safety_checker=A_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCamelCase__ ( self ) ->Dict[str, Any]: '''simple docstring''' return {k: getattr(self , A_ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCamelCase__ ( self , A_ = "auto" ) ->Union[str, Any]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowerCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A_ ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.enable_attention_slicing(A_ ) @torch.no_grad() def UpperCamelCase__ ( self , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ) ->Tuple: '''simple docstring''' return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCamelCase__ ( self , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ) ->Dict: '''simple docstring''' return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCamelCase__ ( self , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ) ->Any: '''simple docstring''' return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCamelCase__ ( self , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ) ->str: '''simple docstring''' return self.pipea( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) @torch.no_grad() def UpperCamelCase__ ( self , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(A_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowerCAmelCase : Tuple = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 __lowerCAmelCase : Tuple = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 __lowerCAmelCase : Any = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 __lowerCAmelCase : Optional[Any] = self.textaimg_sda_a( prompt=A_ , height=A_ , width=A_ , num_inference_steps=A_ , guidance_scale=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , output_type=A_ , return_dict=A_ , callback=A_ , callback_steps=A_ , **A_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
275
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ConsistencyModelPipeline _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _UpperCamelCase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase__ ( self , A_=False ) ->Dict: '''simple docstring''' if class_cond: __lowerCAmelCase : List[str] = self.dummy_cond_unet else: __lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Dict = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple: '''simple docstring''' if str(A_ ).startswith('''mps''' ): __lowerCAmelCase : str = torch.manual_seed(A_ ) else: __lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Any = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Dict = None __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = torch.manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ ) __lowerCAmelCase : Union[str, Any] = latents return inputs def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]: '''simple docstring''' if type(A_ ) == str: __lowerCAmelCase : int = torch.device(A_ ) __lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) return latents def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = self.get_inputs() __lowerCAmelCase : Any = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : List[Any] = self.get_inputs() __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
275
1
def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[int] = 0 # if input_string is "aba" than new_input_string become "a|b|a" __lowerCAmelCase : Optional[Any] = '''''' __lowerCAmelCase : Any = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(lowercase__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring __lowerCAmelCase, __lowerCAmelCase : Any = 0, 0 # length[i] shows the length of palindromic substring with center i __lowerCAmelCase : List[Any] = [1 for i in range(len(lowercase__ ) )] # for each character in new_string find corresponding palindromic string __lowerCAmelCase : Dict = 0 for j in range(len(lowercase__ ) ): __lowerCAmelCase : List[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(lowercase__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 __lowerCAmelCase : Optional[int] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: __lowerCAmelCase : Any = j - k + 1 # noqa: E741 __lowerCAmelCase : List[Any] = j + k - 1 # update max_length and start position if max_length < length[j]: __lowerCAmelCase : Tuple = length[j] __lowerCAmelCase : Optional[int] = j # create that string __lowerCAmelCase : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
275
from collections import deque from .hash_table import HashTable class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' super().__init__(*A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCAmelCase : int = self.values[key] def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , A_ , A_=None ) ->str: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
275
1
def _lowercase ( lowercase__ ): __lowerCAmelCase : Tuple = 1 for i in range(1 , num + 1 ): fact *= i return fact def _lowercase ( lowercase__ ): __lowerCAmelCase : str = 0 while number > 0: __lowerCAmelCase : int = number % 1_0 sum_of_digits += last_digit __lowerCAmelCase : Tuple = number // 1_0 # Removing the last_digit from the given number return sum_of_digits def _lowercase ( lowercase__ = 1_0_0 ): __lowerCAmelCase : str = factorial(lowercase__ ) __lowerCAmelCase : Dict = split_and_add(lowercase__ ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
275
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Optional[Any] = global_rng __lowerCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = parent __lowerCAmelCase : Optional[int] = batch_size __lowerCAmelCase : Any = min_seq_length __lowerCAmelCase : Tuple = max_seq_length __lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Dict = feature_size __lowerCAmelCase : Optional[int] = padding_value __lowerCAmelCase : Tuple = sampling_rate __lowerCAmelCase : Union[str, Any] = return_attention_mask __lowerCAmelCase : Dict = do_normalize def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WavaVecaFeatureExtractor def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : List[Any] = np.asarray(A_ ) __lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : str = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 ) __lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths] __lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : List[str] = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ ) __lowerCAmelCase : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : List[str] = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) __lowerCAmelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : int = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Optional[int] = feat_extract( A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' import torch __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa ) __lowerCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCamelCase__ ( self ) ->int: '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ ) __lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
275
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) _UpperCamelCase = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : List[Any] = use_input_mask __lowerCAmelCase : Optional[int] = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : int = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : List[str] = relative_attention __lowerCAmelCase : Union[str, Any] = position_biased_input __lowerCAmelCase : int = pos_att_type __lowerCAmelCase : List[Any] = scope def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCAmelCase : List[str] = None if self.use_token_type_ids: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : int = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_config() __lowerCAmelCase : Dict = 300 return config def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0] __lowerCAmelCase : List[str] = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = DebertaModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase (unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. __lowerCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
275
1
def _lowercase ( lowercase__ = 2_0_0 ): __lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __lowerCAmelCase : Dict = [0] * (pence + 1) __lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
275
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowercase ( lowercase__ ): __lowerCAmelCase : str = [] __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : str = [] for rt in rc.restypes: __lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) __lowerCAmelCase : List[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Optional[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Tuple = torch.tensor( lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , ) __lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : int = residx_atomaa_mask __lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask __lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): __lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter] __lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __lowerCAmelCase : str = rc.atom_order[atom_name] __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : Any = residx_atomaa_mask return protein def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray ) __lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) ) return out
275
1
def _lowercase ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b" __lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:] __lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
def _lowercase ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowerCAmelCase : int = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": _UpperCamelCase = input("Enter a string ").strip() _UpperCamelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
275
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 class __lowercase (nn.Module ): def __init__( self , A_=3 , A_=3 , A_=("DownEncoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_=True , ) ->Union[str, Any]: '''simple docstring''' super().__init__() __lowerCAmelCase : Any = layers_per_block __lowerCAmelCase : str = torch.nn.Convad( A_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = nn.ModuleList([] ) # down __lowerCAmelCase : Tuple = block_out_channels[0] for i, down_block_type in enumerate(A_ ): __lowerCAmelCase : List[Any] = output_channel __lowerCAmelCase : str = block_out_channels[i] __lowerCAmelCase : List[str] = i == len(A_ ) - 1 __lowerCAmelCase : List[Any] = get_down_block( A_ , num_layers=self.layers_per_block , in_channels=A_ , out_channels=A_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , ) self.down_blocks.append(A_ ) # mid __lowerCAmelCase : Tuple = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , ) # out __lowerCAmelCase : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A_ , eps=1e-6 ) __lowerCAmelCase : Optional[Any] = nn.SiLU() __lowerCAmelCase : Any = 2 * out_channels if double_z else out_channels __lowerCAmelCase : Optional[Any] = nn.Convad(block_out_channels[-1] , A_ , 3 , padding=1 ) __lowerCAmelCase : List[Any] = False def UpperCamelCase__ ( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = x __lowerCAmelCase : Optional[int] = self.conv_in(A_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(A_ ): def custom_forward(*A_ ): return module(*A_ ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: __lowerCAmelCase : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) , A_ , use_reentrant=A_ ) # middle __lowerCAmelCase : Any = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , A_ , use_reentrant=A_ ) else: for down_block in self.down_blocks: __lowerCAmelCase : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ ) # middle __lowerCAmelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , A_ ) else: # down for down_block in self.down_blocks: __lowerCAmelCase : str = down_block(A_ ) # middle __lowerCAmelCase : List[Any] = self.mid_block(A_ ) # post-process __lowerCAmelCase : Union[str, Any] = self.conv_norm_out(A_ ) __lowerCAmelCase : Dict = self.conv_act(A_ ) __lowerCAmelCase : List[Any] = self.conv_out(A_ ) return sample class __lowercase (nn.Module ): def __init__( self , A_=3 , A_=3 , A_=("UpDecoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_="group" , ) ->int: '''simple docstring''' super().__init__() __lowerCAmelCase : int = layers_per_block __lowerCAmelCase : int = nn.Convad( A_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCAmelCase : Tuple = None __lowerCAmelCase : Any = nn.ModuleList([] ) __lowerCAmelCase : Union[str, Any] = in_channels if norm_type == '''spatial''' else None # mid __lowerCAmelCase : Any = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , ) # up __lowerCAmelCase : List[str] = list(reversed(A_ ) ) __lowerCAmelCase : int = reversed_block_out_channels[0] for i, up_block_type in enumerate(A_ ): __lowerCAmelCase : int = output_channel __lowerCAmelCase : List[str] = reversed_block_out_channels[i] __lowerCAmelCase : Tuple = i == len(A_ ) - 1 __lowerCAmelCase : Any = get_up_block( A_ , num_layers=self.layers_per_block + 1 , in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , resnet_time_scale_shift=A_ , ) self.up_blocks.append(A_ ) __lowerCAmelCase : str = output_channel # out if norm_type == "spatial": __lowerCAmelCase : Tuple = SpatialNorm(block_out_channels[0] , A_ ) else: __lowerCAmelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A_ , eps=1e-6 ) __lowerCAmelCase : Union[str, Any] = nn.SiLU() __lowerCAmelCase : List[Any] = nn.Convad(block_out_channels[0] , A_ , 3 , padding=1 ) __lowerCAmelCase : Tuple = False def UpperCamelCase__ ( self , A_ , A_=None ) ->int: '''simple docstring''' __lowerCAmelCase : List[Any] = z __lowerCAmelCase : Any = self.conv_in(A_ ) __lowerCAmelCase : Tuple = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A_ ): def custom_forward(*A_ ): return module(*A_ ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle __lowerCAmelCase : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , A_ , A_ , use_reentrant=A_ ) __lowerCAmelCase : List[Any] = sample.to(A_ ) # up for up_block in self.up_blocks: __lowerCAmelCase : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(A_ ) , A_ , A_ , use_reentrant=A_ ) else: # middle __lowerCAmelCase : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , A_ , A_ ) __lowerCAmelCase : List[Any] = sample.to(A_ ) # up for up_block in self.up_blocks: __lowerCAmelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ , A_ ) else: # middle __lowerCAmelCase : List[str] = self.mid_block(A_ , A_ ) __lowerCAmelCase : List[str] = sample.to(A_ ) # up for up_block in self.up_blocks: __lowerCAmelCase : int = up_block(A_ , A_ ) # post-process if latent_embeds is None: __lowerCAmelCase : int = self.conv_norm_out(A_ ) else: __lowerCAmelCase : List[Any] = self.conv_norm_out(A_ , A_ ) __lowerCAmelCase : Optional[Any] = self.conv_act(A_ ) __lowerCAmelCase : Optional[int] = self.conv_out(A_ ) return sample class __lowercase (nn.Module ): def __init__( self , A_ , A_ , A_ , A_=None , A_="random" , A_=False , A_=True ) ->List[str]: '''simple docstring''' super().__init__() __lowerCAmelCase : List[Any] = n_e __lowerCAmelCase : Dict = vq_embed_dim __lowerCAmelCase : Tuple = beta __lowerCAmelCase : Union[str, Any] = legacy __lowerCAmelCase : List[str] = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) __lowerCAmelCase : Union[str, Any] = remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) __lowerCAmelCase : Optional[Any] = self.used.shape[0] __lowerCAmelCase : List[Any] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCAmelCase : Optional[Any] = self.re_embed __lowerCAmelCase : Optional[int] = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: __lowerCAmelCase : Optional[Any] = n_e __lowerCAmelCase : List[str] = sane_index_shape def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = inds.shape assert len(A_ ) > 1 __lowerCAmelCase : List[Any] = inds.reshape(ishape[0] , -1 ) __lowerCAmelCase : Any = self.used.to(A_ ) __lowerCAmelCase : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long() __lowerCAmelCase : Optional[int] = match.argmax(-1 ) __lowerCAmelCase : List[str] = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCAmelCase : List[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: __lowerCAmelCase : Dict = self.unknown_index return new.reshape(A_ ) def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Any = inds.shape assert len(A_ ) > 1 __lowerCAmelCase : Tuple = inds.reshape(ishape[0] , -1 ) __lowerCAmelCase : Optional[int] = self.used.to(A_ ) if self.re_embed > self.used.shape[0]: # extra token __lowerCAmelCase : Dict = 0 # simply set to zero __lowerCAmelCase : Optional[int] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A_ ) return back.reshape(A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : str = z.permute(0 , 2 , 3 , 1 ).contiguous() __lowerCAmelCase : Any = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCAmelCase : Optional[int] = torch.argmin(torch.cdist(A_ , self.embedding.weight ) , dim=1 ) __lowerCAmelCase : Dict = self.embedding(A_ ).view(z.shape ) __lowerCAmelCase : str = None __lowerCAmelCase : Optional[Any] = None # compute loss for embedding if not self.legacy: __lowerCAmelCase : Dict = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCAmelCase : Any = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCAmelCase : Dict = z + (z_q - z).detach() # reshape back to match original input shape __lowerCAmelCase : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: __lowerCAmelCase : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis __lowerCAmelCase : Any = self.remap_to_used(A_ ) __lowerCAmelCase : int = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: __lowerCAmelCase : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def UpperCamelCase__ ( self , A_ , A_ ) ->List[Any]: '''simple docstring''' if self.remap is not None: __lowerCAmelCase : Tuple = indices.reshape(shape[0] , -1 ) # add batch axis __lowerCAmelCase : Optional[int] = self.unmap_to_all(A_ ) __lowerCAmelCase : str = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCAmelCase : int = self.embedding(A_ ) if shape is not None: __lowerCAmelCase : Any = z_q.view(A_ ) # reshape back to match original input shape __lowerCAmelCase : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=False ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Tuple = parameters __lowerCAmelCase, __lowerCAmelCase : Any = torch.chunk(A_ , 2 , dim=1 ) __lowerCAmelCase : str = torch.clamp(self.logvar , -30.0 , 20.0 ) __lowerCAmelCase : List[str] = deterministic __lowerCAmelCase : str = torch.exp(0.5 * self.logvar ) __lowerCAmelCase : List[Any] = torch.exp(self.logvar ) if self.deterministic: __lowerCAmelCase : Optional[int] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def UpperCamelCase__ ( self , A_ = None ) ->torch.FloatTensor: '''simple docstring''' __lowerCAmelCase : Tuple = randn_tensor( self.mean.shape , generator=A_ , device=self.parameters.device , dtype=self.parameters.dtype ) __lowerCAmelCase : Optional[int] = self.mean + self.std * sample return x def UpperCamelCase__ ( self , A_=None ) ->Dict: '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def UpperCamelCase__ ( self , A_ , A_=[1, 2, 3] ) ->Optional[int]: '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) __lowerCAmelCase : Union[str, Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=A_ ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return self.mean
275
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None class __lowercase (_UpperCAmelCase , _UpperCAmelCase ): _UpperCamelCase = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]: '''simple docstring''' __lowerCAmelCase : str = num_inference_steps __lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ ) __lowerCAmelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) __lowerCAmelCase : str = sigma + gamma * sigma __lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = sample_prev + sigma_prev * model_output __lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' raise NotImplementedError()
275
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _UpperCamelCase = None _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _UpperCamelCase = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _UpperCamelCase = { "camembert-base": 512, } _UpperCamelCase = "▁" class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] _UpperCamelCase = CamembertTokenizer def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , ) __lowerCAmelCase : List[str] = vocab_file __lowerCAmelCase : List[Any] = False if not self.vocab_file else True def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase : Any = [self.cls_token_id] __lowerCAmelCase : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[int]: '''simple docstring''' __lowerCAmelCase : List[str] = [self.sep_token_id] __lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Tuple = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
275
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : int = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowerCAmelCase : int = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase : List[Any] = self.tokenizer.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCAmelCase : Optional[int] = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase : Any = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: __lowerCAmelCase : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] __lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features] __lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ ) __lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features] __lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ ) __lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
275
1
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __lowercase : _UpperCamelCase = field( metadata={"""help""": """The output directory where the model will be written."""} , ) _UpperCamelCase = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } , ) _UpperCamelCase = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } , ) _UpperCamelCase = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) _UpperCamelCase = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def _lowercase ( ): __lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments,) ) ((__lowerCAmelCase), ) : int = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: __lowerCAmelCase : Any = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: __lowerCAmelCase : Any = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: __lowerCAmelCase : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed __lowerCAmelCase : Union[str, Any] = True __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : int = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowercase__ , decoder_config=lowercase__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens __lowerCAmelCase : Dict = decoder_config.decoder_start_token_id __lowerCAmelCase : Any = decoder_config.pad_token_id if decoder_start_token_id is None: __lowerCAmelCase : Tuple = decoder_config.bos_token_id if pad_token_id is None: __lowerCAmelCase : Optional[int] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work __lowerCAmelCase : int = decoder_config.eos_token_id __lowerCAmelCase : List[Any] = decoder_start_token_id __lowerCAmelCase : str = pad_token_id __lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) __lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
275
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): _UpperCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = (3, 32, 128) __lowerCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off __lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) __lowerCAmelCase : Union[str, Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __lowerCAmelCase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Optional[int] = self.prepare_image_inputs() __lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' ) __lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Any = '''test''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : str = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = '''test''' __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : int = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Optional[int] = processor.char_decode(A_ ) __lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ ) __lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 ) __lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 ) __lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 ) __lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
275
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = 3_8_4 if "tiny" in model_name: __lowerCAmelCase : Optional[Any] = [3, 3, 9, 3] __lowerCAmelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8] if "small" in model_name: __lowerCAmelCase : str = [3, 3, 2_7, 3] __lowerCAmelCase : str = [9_6, 1_9_2, 3_8_4, 7_6_8] if "base" in model_name: __lowerCAmelCase : str = [3, 3, 2_7, 3] __lowerCAmelCase : List[str] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4] __lowerCAmelCase : Dict = 5_1_2 if "large" in model_name: __lowerCAmelCase : List[str] = [3, 3, 2_7, 3] __lowerCAmelCase : Dict = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6] __lowerCAmelCase : Union[str, Any] = 7_6_8 if "xlarge" in model_name: __lowerCAmelCase : Any = [3, 3, 2_7, 3] __lowerCAmelCase : str = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] __lowerCAmelCase : str = 1_0_2_4 # set label information __lowerCAmelCase : Optional[Any] = 1_5_0 __lowerCAmelCase : int = '''huggingface/label-files''' __lowerCAmelCase : str = '''ade20k-id2label.json''' __lowerCAmelCase : Tuple = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) ) __lowerCAmelCase : str = {int(lowercase__ ): v for k, v in idalabel.items()} __lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} __lowerCAmelCase : str = ConvNextConfig( depths=lowercase__ , hidden_sizes=lowercase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) __lowerCAmelCase : List[Any] = UperNetConfig( backbone_config=lowercase__ , auxiliary_in_channels=lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , ) return config def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[Any] = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[Any] = dct.pop(lowercase__ ) __lowerCAmelCase : List[str] = val def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } __lowerCAmelCase : str = model_name_to_url[model_name] __lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(lowercase__ , map_location='''cpu''' )['''state_dict'''] __lowerCAmelCase : Optional[int] = get_upernet_config(lowercase__ ) __lowerCAmelCase : List[Any] = UperNetForSemanticSegmentation(lowercase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __lowerCAmelCase : str = state_dict.pop(lowercase__ ) if "bn" in key: __lowerCAmelCase : Tuple = key.replace('''bn''' , '''batch_norm''' ) __lowerCAmelCase : Dict = val # rename keys __lowerCAmelCase : Tuple = create_rename_keys(lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ ) # verify on image __lowerCAmelCase : Dict = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' __lowerCAmelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' ) __lowerCAmelCase : Optional[int] = SegformerImageProcessor() __lowerCAmelCase : List[str] = processor(lowercase__ , return_tensors='''pt''' ).pixel_values with torch.no_grad(): __lowerCAmelCase : int = model(lowercase__ ) if model_name == "upernet-convnext-tiny": __lowerCAmelCase : str = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ) elif model_name == "upernet-convnext-small": __lowerCAmelCase : List[str] = torch.tensor( [[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] ) elif model_name == "upernet-convnext-base": __lowerCAmelCase : Optional[Any] = torch.tensor( [[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] ) elif model_name == "upernet-convnext-large": __lowerCAmelCase : int = torch.tensor( [[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] ) elif model_name == "upernet-convnext-xlarge": __lowerCAmelCase : List[Any] = torch.tensor( [[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowercase__ ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[F"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _UpperCamelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
275
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
1
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __lowercase (_UpperCAmelCase ): _UpperCamelCase = (KDPMaDiscreteScheduler,) _UpperCamelCase = 10 def UpperCamelCase__ ( self , **A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**A_ ) return config def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] __lowerCAmelCase : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) __lowerCAmelCase : Optional[Any] = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCAmelCase : Union[str, Any] = self.dummy_model() __lowerCAmelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCAmelCase : List[Any] = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(A_ , A_ ) __lowerCAmelCase : int = model(A_ , A_ ) __lowerCAmelCase : List[str] = scheduler.step(A_ , A_ , A_ ) __lowerCAmelCase : Optional[Any] = output.prev_sample __lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) ) __lowerCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2 assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0_002 ) < 1e-3 def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' if torch_device == "mps": return __lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] __lowerCAmelCase : int = self.get_scheduler_config() __lowerCAmelCase : Optional[Any] = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCAmelCase : Union[str, Any] = self.dummy_model() __lowerCAmelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCAmelCase : List[str] = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase : str = scheduler.scale_model_input(A_ , A_ ) __lowerCAmelCase : Optional[int] = model(A_ , A_ ) __lowerCAmelCase : Optional[int] = scheduler.step(A_ , A_ , A_ ) __lowerCAmelCase : Any = output.prev_sample __lowerCAmelCase : List[str] = torch.sum(torch.abs(A_ ) ) __lowerCAmelCase : List[str] = torch.mean(torch.abs(A_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' if torch_device == "mps": return __lowerCAmelCase : List[str] = self.scheduler_classes[0] __lowerCAmelCase : Union[str, Any] = self.get_scheduler_config() __lowerCAmelCase : List[str] = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps , device=A_ ) __lowerCAmelCase : List[str] = self.dummy_model() __lowerCAmelCase : List[str] = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowerCAmelCase : int = scheduler.scale_model_input(A_ , A_ ) __lowerCAmelCase : List[str] = model(A_ , A_ ) __lowerCAmelCase : Any = scheduler.step(A_ , A_ , A_ ) __lowerCAmelCase : Tuple = output.prev_sample __lowerCAmelCase : Any = torch.sum(torch.abs(A_ ) ) __lowerCAmelCase : Dict = torch.mean(torch.abs(A_ ) ) if str(A_ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3
275
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _UpperCamelCase = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _UpperCamelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _UpperCamelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] ) return (item, float(lowercase__ )) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 ) __lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:] __lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = list(lowercase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCAmelCase : int = random.choice(lowercase__ ) return "".join(lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : str = [] # Generate more children proportionally to the fitness score. __lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1 __lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n for _ in range(lowercase__ ): __lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0] __lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ ) # Append new string to the population list. pop.append(mutate(lowercase__ , lowercase__ ) ) pop.append(mutate(lowercase__ , lowercase__ ) ) return pop def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(lowercase__ ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCAmelCase : Any = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(lowercase__ ) # Generate random starting population. __lowerCAmelCase : List[Any] = [] for _ in range(lowercase__ ): population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowercase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population] # Check if there is a matching evolution. __lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowercase__ ) # Normalize population score to be between 0 and 1. __lowerCAmelCase : List[Any] = [ (item, score / len(lowercase__ )) for item, score in population_score ] # This is selection for i in range(lowercase__ ): population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowercase__ ) > N_POPULATION: break if __name__ == "__main__": _UpperCamelCase = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _UpperCamelCase = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
275
1
import itertools import math def _lowercase ( lowercase__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowercase ( ): __lowerCAmelCase : List[str] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _lowercase ( lowercase__ = 1_0_0_0_1 ): return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) ) if __name__ == "__main__": print(F"{solution() = }")
275
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer _UpperCamelCase = logging.get_logger(__name__) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """AutoTokenizer""" _UpperCamelCase = ["""tokenizer"""] _UpperCamelCase = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , A_ , A_=None ) ->int: '''simple docstring''' super().__init__(A_ ) __lowerCAmelCase : str = speaker_embeddings @classmethod def UpperCamelCase__ ( cls , A_ , A_="speaker_embeddings_path.json" , **A_ ) ->Optional[int]: '''simple docstring''' if speaker_embeddings_dict_path is not None: __lowerCAmelCase : int = get_file_from_repo( A_ , A_ , subfolder=kwargs.pop('''subfolder''' , A_ ) , cache_dir=kwargs.pop('''cache_dir''' , A_ ) , force_download=kwargs.pop('''force_download''' , A_ ) , proxies=kwargs.pop('''proxies''' , A_ ) , resume_download=kwargs.pop('''resume_download''' , A_ ) , local_files_only=kwargs.pop('''local_files_only''' , A_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , A_ ) , revision=kwargs.pop('''revision''' , A_ ) , ) if speaker_embeddings_path is None: logger.warning( f"""`{os.path.join(A_ , A_ )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" ) __lowerCAmelCase : Any = None else: with open(A_ ) as speaker_embeddings_json: __lowerCAmelCase : Optional[Any] = json.load(A_ ) else: __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = AutoTokenizer.from_pretrained(A_ , **A_ ) return cls(tokenizer=A_ , speaker_embeddings=A_ ) def UpperCamelCase__ ( self , A_ , A_="speaker_embeddings_path.json" , A_="speaker_embeddings" , A_ = False , **A_ , ) ->Dict: '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(A_ , A_ , '''v2''' ) , exist_ok=A_ ) __lowerCAmelCase : Any = {} __lowerCAmelCase : Tuple = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": __lowerCAmelCase : Optional[Any] = self._load_voice_preset(A_ ) __lowerCAmelCase : List[Any] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , A_ , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=A_ , ) __lowerCAmelCase : Union[str, Any] = os.path.join(A_ , f"""{prompt_key}_{key}.npy""" ) __lowerCAmelCase : Tuple = tmp_dict with open(os.path.join(A_ , A_ ) , '''w''' ) as fp: json.dump(A_ , A_ ) super().save_pretrained(A_ , A_ , **A_ ) def UpperCamelCase__ ( self , A_ = None , **A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.speaker_embeddings[voice_preset] __lowerCAmelCase : Optional[Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" ) __lowerCAmelCase : Tuple = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , A_ ) , cache_dir=kwargs.pop('''cache_dir''' , A_ ) , force_download=kwargs.pop('''force_download''' , A_ ) , proxies=kwargs.pop('''proxies''' , A_ ) , resume_download=kwargs.pop('''resume_download''' , A_ ) , local_files_only=kwargs.pop('''local_files_only''' , A_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , A_ ) , revision=kwargs.pop('''revision''' , A_ ) , ) if path is None: raise ValueError( f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""" ) __lowerCAmelCase : Optional[int] = np.load(A_ ) return voice_preset_dict def UpperCamelCase__ ( self , A_ = None ) ->str: '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) def __call__( self , A_=None , A_=None , A_="pt" , A_=256 , A_=False , A_=True , A_=False , **A_ , ) ->Tuple: '''simple docstring''' if voice_preset is not None and not isinstance(A_ , A_ ): if ( isinstance(A_ , A_ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): __lowerCAmelCase : Dict = self._load_voice_preset(A_ ) else: if isinstance(A_ , A_ ) and not voice_preset.endswith('''.npz''' ): __lowerCAmelCase : Dict = voice_preset + '''.npz''' __lowerCAmelCase : int = np.load(A_ ) if voice_preset is not None: self._validate_voice_preset_dict(A_ , **A_ ) __lowerCAmelCase : Optional[Any] = BatchFeature(data=A_ , tensor_type=A_ ) __lowerCAmelCase : Union[str, Any] = self.tokenizer( A_ , return_tensors=A_ , padding='''max_length''' , max_length=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , add_special_tokens=A_ , **A_ , ) if voice_preset is not None: __lowerCAmelCase : Optional[Any] = voice_preset return encoded_text
275
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """table-transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A_ , A_ ): __lowerCAmelCase : int = backbone_config.get('''model_type''' ) __lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase : Any = config_class.from_dict(A_ ) # set timm attributes to None __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None __lowerCAmelCase : Tuple = use_timm_backbone __lowerCAmelCase : Optional[Any] = backbone_config __lowerCAmelCase : List[str] = num_channels __lowerCAmelCase : Tuple = num_queries __lowerCAmelCase : int = d_model __lowerCAmelCase : List[Any] = encoder_ffn_dim __lowerCAmelCase : Optional[int] = encoder_layers __lowerCAmelCase : List[str] = encoder_attention_heads __lowerCAmelCase : str = decoder_ffn_dim __lowerCAmelCase : Union[str, Any] = decoder_layers __lowerCAmelCase : Any = decoder_attention_heads __lowerCAmelCase : Optional[int] = dropout __lowerCAmelCase : Any = attention_dropout __lowerCAmelCase : Tuple = activation_dropout __lowerCAmelCase : Optional[Any] = activation_function __lowerCAmelCase : List[str] = init_std __lowerCAmelCase : Tuple = init_xavier_std __lowerCAmelCase : Any = encoder_layerdrop __lowerCAmelCase : List[Any] = decoder_layerdrop __lowerCAmelCase : Optional[Any] = encoder_layers __lowerCAmelCase : Optional[Any] = auxiliary_loss __lowerCAmelCase : Optional[Any] = position_embedding_type __lowerCAmelCase : Tuple = backbone __lowerCAmelCase : Any = use_pretrained_backbone __lowerCAmelCase : int = dilation # Hungarian matcher __lowerCAmelCase : Dict = class_cost __lowerCAmelCase : List[str] = bbox_cost __lowerCAmelCase : int = giou_cost # Loss coefficients __lowerCAmelCase : Optional[Any] = mask_loss_coefficient __lowerCAmelCase : Tuple = dice_loss_coefficient __lowerCAmelCase : int = bbox_loss_coefficient __lowerCAmelCase : List[Any] = giou_loss_coefficient __lowerCAmelCase : int = eos_coefficient super().__init__(is_encoder_decoder=A_ , **A_ ) @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.d_model class __lowercase (_UpperCAmelCase ): _UpperCamelCase = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def UpperCamelCase__ ( self ) ->float: '''simple docstring''' return 1e-5 @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return 12
275
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCamelCase = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["PoolFormerFeatureExtractor"] _UpperCamelCase = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
275
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Any = global_rng __lowerCAmelCase : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : str = min_seq_length __lowerCAmelCase : int = max_seq_length __lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Any = padding_value __lowerCAmelCase : str = sampling_rate __lowerCAmelCase : Optional[Any] = return_attention_mask __lowerCAmelCase : Optional[Any] = do_normalize __lowerCAmelCase : Optional[Any] = feature_size __lowerCAmelCase : Optional[int] = chunk_length __lowerCAmelCase : Optional[Any] = hop_length def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0] check_json_file_has_correct_format(A_ ) __lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ ) __lowerCAmelCase : Dict = feat_extract_first.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters __lowerCAmelCase : Dict = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A_ ) __lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ ) __lowerCAmelCase : List[str] = feat_extract_first.to_dict() __lowerCAmelCase : Tuple = feat_extract_second.to_dict() __lowerCAmelCase : Any = feat_extract_first.mel_filters __lowerCAmelCase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : Optional[int] = np.asarray(A_ ) __lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] __lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated] __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' import torch __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) __lowerCAmelCase : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on __lowerCAmelCase : int = self._load_datasamples(1 ) __lowerCAmelCase : Any = WhisperFeatureExtractor() __lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = self._load_datasamples(1 )[0] __lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue __lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0] self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
275
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} _UpperCamelCase = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } _UpperCamelCase = { "abeja/gpt-neox-japanese-2.7b": 2048, } def _lowercase ( lowercase__ , lowercase__ ): with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f: __lowerCAmelCase : int = json.loads(f.read() ) __lowerCAmelCase : List[Any] = collections.OrderedDict() __lowerCAmelCase : List[Any] = collections.OrderedDict() __lowerCAmelCase : int = collections.OrderedDict() with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f: __lowerCAmelCase : Optional[int] = f.readlines() __lowerCAmelCase : str = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token] for idx, b in enumerate(lowercase__ ): __lowerCAmelCase : List[str] = b __lowerCAmelCase : Tuple = idx for wd in b: __lowerCAmelCase : List[str] = idx return vocab, raw_vocab, ids_to_tokens, emoji class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_ , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|startoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) ->Any: '''simple docstring''' super().__init__( unk_token=A_ , pad_token=A_ , bos_token=A_ , eos_token=A_ , do_clean_text=A_ , **A_ , ) if not os.path.isfile(A_ ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) if not os.path.isfile(A_ ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) __lowerCAmelCase : Optional[Any] = do_clean_text __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = load_vocab_and_emoji(A_ , A_ ) __lowerCAmelCase : int = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' return len(self.raw_vocab ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self , A_ ) ->Tuple: '''simple docstring''' return self.subword_tokenizer.tokenize(A_ , clean=self.do_clean_text ) def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' return self.vocab.get(A_ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , A_ ) ->Any: '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(A_ ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Dict = ''''''.join(A_ ).strip() return out_string def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : Optional[int] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] ) if len(A_ ) > self.model_max_length: __lowerCAmelCase : Optional[Any] = input_ids[-self.model_max_length :] return input_ids def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = 0 if os.path.isdir(A_ ): __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] ) else: __lowerCAmelCase : int = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase : List[Any] = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file'''] ) with open(A_ , '''w''' , encoding='''utf-8''' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) __lowerCAmelCase : int = token_index writer.write(''','''.join(A_ ) + '''\n''' ) index += 1 with open(A_ , '''w''' , encoding='''utf-8''' ) as writer: json.dump(self.emoji , A_ ) return vocab_file, emoji_file class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_ , A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : int = vocab # same as swe __lowerCAmelCase : Optional[Any] = ids_to_tokens # same as bpe __lowerCAmelCase : int = emoji __lowerCAmelCase : Optional[int] = np.max([len(A_ ) for w in self.vocab.keys()] ) __lowerCAmelCase : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' ) __lowerCAmelCase : Optional[int] = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' ) __lowerCAmelCase : Optional[int] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' ) __lowerCAmelCase : int = re.compile( R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) __lowerCAmelCase : str = re.compile( R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) __lowerCAmelCase : Tuple = re.compile( R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' ) __lowerCAmelCase : Union[str, Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿''' __lowerCAmelCase : Union[str, Any] = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟''' __lowerCAmelCase : Union[str, Any] = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} ) def __len__( self ) ->Any: '''simple docstring''' return len(self.ids_to_tokens ) def UpperCamelCase__ ( self , A_ ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.content_repattera.sub('''<URL>''' , A_ ) __lowerCAmelCase : List[Any] = self.content_repattera.sub('''<EMAIL>''' , A_ ) __lowerCAmelCase : List[str] = self.content_repattera.sub('''<TEL>''' , A_ ) __lowerCAmelCase : str = self.content_repattera.sub('''<DATE>''' , A_ ) __lowerCAmelCase : int = self.content_repattera.sub('''<DATE>''' , A_ ) __lowerCAmelCase : Any = self.content_repattera.sub('''<PRICE>''' , A_ ) __lowerCAmelCase : List[Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowerCAmelCase : str = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' ) return content def UpperCamelCase__ ( self , A_ , A_=False ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = text.replace(''' ''' , '''<SP>''' ) __lowerCAmelCase : List[Any] = text.replace(''' ''' , '''<SP>''' ) __lowerCAmelCase : Any = text.replace('''\r\n''' , '''<BR>''' ) __lowerCAmelCase : str = text.replace('''\n''' , '''<BR>''' ) __lowerCAmelCase : Union[str, Any] = text.replace('''\r''' , '''<BR>''' ) __lowerCAmelCase : Union[str, Any] = text.replace('''\t''' , '''<TAB>''' ) __lowerCAmelCase : Optional[Any] = text.replace('''—''' , '''ー''' ) __lowerCAmelCase : List[Any] = text.replace('''−''' , '''ー''' ) for k, v in self.emoji["emoji"].items(): if k in text: __lowerCAmelCase : List[Any] = text.replace(A_ , A_ ) if clean: __lowerCAmelCase : List[Any] = self.clean_text(A_ ) def check_simbol(A_ ): __lowerCAmelCase : List[str] = x.encode() if len(A_ ) == 1 and len(A_ ) == 2: __lowerCAmelCase : Optional[Any] = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC_2_A_1 and c <= 0XC_2_B_F) or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3) or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F) or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2) ): return True return False def checkuae(A_ ): __lowerCAmelCase : int = x.encode() if len(A_ ) == 1 and len(A_ ) == 3: __lowerCAmelCase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F: return True return False __lowerCAmelCase : List[Any] = 0 __lowerCAmelCase : Union[str, Any] = [] while pos < len(A_ ): __lowerCAmelCase : Optional[int] = min(len(A_ ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3 __lowerCAmelCase : Tuple = [] # (token_id, token, pos) for e in range(A_ , A_ , -1 ): __lowerCAmelCase : Optional[Any] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(A_ ) > 2: __lowerCAmelCase : int = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(A_ ) > 0: # the smallest token_id is adopted __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : str = sorted(A_ , key=lambda A_ : x[0] )[0] result.append(A_ ) __lowerCAmelCase : Optional[int] = e else: __lowerCAmelCase : List[Any] = pos + 1 __lowerCAmelCase : List[str] = text[pos:end] if check_simbol(A_ ): result.append('''<KIGOU>''' ) elif checkuae(A_ ): result.append('''<U2000U2BFF>''' ) else: for i in wd.encode('''utf-8''' ): result.append('''<|byte%d|>''' % i ) __lowerCAmelCase : str = end return result def UpperCamelCase__ ( self , A_ , A_="\n" ) ->int: '''simple docstring''' __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : Optional[int] = [] __lowerCAmelCase : Union[str, Any] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(A_ ) > 0: words.append(bytearray(A_ ).decode('''utf-8''' , errors='''replace''' ) ) __lowerCAmelCase : Optional[Any] = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['''emoji_inv'''][word] ) elif word == "<SP>": words.append(''' ''' ) elif word == "<BR>": words.append(A_ ) elif word == "<TAB>": words.append('''\t''' ) elif word == "<BLOCK>": words.append('''▀''' ) elif word == "<KIGOU>": words.append('''ǀ''' ) elif word == "<U2000U2BFF>": words.append('''‖''' ) else: words.append(A_ ) if len(A_ ) > 0: words.append(bytearray(A_ ).decode('''utf-8''' , errors='''replace''' ) ) __lowerCAmelCase : str = ''''''.join(A_ ) return text
275
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } _UpperCamelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = {} with open(lowercase__ , '''r''' ) as file: for line_number, line in enumerate(lowercase__ ): __lowerCAmelCase : Any = line.strip() if line: __lowerCAmelCase : Dict = line.split() __lowerCAmelCase : str = line_number __lowerCAmelCase : List[str] = words[0] __lowerCAmelCase : Any = value return result def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : List[Any] = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : List[Any] = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : str = getattr(lowercase__ , lowercase__ ).shape elif weight_type is not None and weight_type == "param": __lowerCAmelCase : Dict = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : str = shape_pointer.shape # let's reduce dimension __lowerCAmelCase : Any = value[0] else: __lowerCAmelCase : str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[str] = value elif weight_type == "weight_v": __lowerCAmelCase : int = value elif weight_type == "bias": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = value else: __lowerCAmelCase : Any = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : int = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : Tuple = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCAmelCase : List[str] = '''.'''.join([key, hf_param_name] ) else: __lowerCAmelCase : Optional[int] = key __lowerCAmelCase : Union[str, Any] = value if '''lm_head''' in full_key else value[0] _UpperCamelCase = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): __lowerCAmelCase : Any = False for key, mapped_key in MAPPING.items(): __lowerCAmelCase : Tuple = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Optional[Any] = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : List[Any] = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : List[Any] = '''weight_v''' elif "bias" in name: __lowerCAmelCase : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : int = '''weight''' else: __lowerCAmelCase : Any = None if hf_dict is not None: rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return is_used return is_used def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() __lowerCAmelCase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Any = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : int = True else: __lowerCAmelCase : Dict = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ ) if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : List[str] = name.split('''.''' ) __lowerCAmelCase : Any = int(items[0] ) __lowerCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ): if config_path is not None: __lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : Optional[int] = WavaVecaConfig() if is_seq_class: __lowerCAmelCase : Optional[Any] = read_txt_into_dict(lowercase__ ) __lowerCAmelCase : int = idalabel __lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowercase__ ) __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) feature_extractor.save_pretrained(lowercase__ ) elif is_finetuned: if dict_path: __lowerCAmelCase : List[str] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCAmelCase : List[Any] = target_dict.pad_index __lowerCAmelCase : List[Any] = target_dict.bos_index __lowerCAmelCase : Optional[int] = target_dict.eos_index __lowerCAmelCase : Any = len(target_dict.symbols ) __lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowerCAmelCase : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : int = 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __lowerCAmelCase : Dict = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __lowerCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowerCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowerCAmelCase : str = WavaVecaForCTC(lowercase__ ) else: __lowerCAmelCase : Any = WavaVecaForPreTraining(lowercase__ ) if is_finetuned or is_seq_class: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) __lowerCAmelCase : str = fairseq.tasks.setup_task(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __lowerCAmelCase : int = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
275
1
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = LxmertTokenizer _UpperCamelCase = LxmertTokenizerFast _UpperCamelCase = True _UpperCamelCase = True def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().setUp() __lowerCAmelCase : str = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def UpperCamelCase__ ( self , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : str = '''UNwant\u00E9d,running''' __lowerCAmelCase : Any = '''unwanted, running''' return input_text, output_text def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file ) __lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(A_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [7, 4, 5, 10, 8, 9] ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' if not self.test_rust_tokenizer: return __lowerCAmelCase : Optional[int] = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer() __lowerCAmelCase : Optional[int] = '''I was born in 92000, and this is falsé.''' __lowerCAmelCase : Optional[Any] = tokenizer.tokenize(A_ ) __lowerCAmelCase : Tuple = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) __lowerCAmelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) __lowerCAmelCase : Tuple = self.get_rust_tokenizer() __lowerCAmelCase : Tuple = tokenizer.encode(A_ ) __lowerCAmelCase : List[Any] = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ )
275
from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """trajectory_transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int: '''simple docstring''' __lowerCAmelCase : Any = vocab_size __lowerCAmelCase : Tuple = action_weight __lowerCAmelCase : Tuple = reward_weight __lowerCAmelCase : Union[str, Any] = value_weight __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : str = block_size __lowerCAmelCase : Optional[Any] = action_dim __lowerCAmelCase : Union[str, Any] = observation_dim __lowerCAmelCase : Union[str, Any] = transition_dim __lowerCAmelCase : Dict = learning_rate __lowerCAmelCase : Any = n_layer __lowerCAmelCase : Any = n_head __lowerCAmelCase : Optional[int] = n_embd __lowerCAmelCase : str = embd_pdrop __lowerCAmelCase : Dict = attn_pdrop __lowerCAmelCase : Optional[int] = resid_pdrop __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : Any = kaiming_initializer_range __lowerCAmelCase : List[str] = use_cache super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
275
1
from __future__ import annotations from cmath import sqrt def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) __lowerCAmelCase : Optional[Any] = b * b - 4 * a * c __lowerCAmelCase : List[str] = (-b + sqrt(lowercase__ )) / (2 * a) __lowerCAmelCase : List[str] = (-b - sqrt(lowercase__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def _lowercase ( ): __lowerCAmelCase, __lowerCAmelCase : int = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
275
def _lowercase ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b" __lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:] __lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) _UpperCamelCase = logging.getLogger(__name__) _UpperCamelCase = tf.data.AUTOTUNE def _lowercase ( ): __lowerCAmelCase : Dict = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' ) parser.add_argument( '''--pretrained_model_config''' , type=lowercase__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , ) parser.add_argument( '''--tokenizer''' , type=lowercase__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , ) parser.add_argument( '''--per_replica_batch_size''' , type=lowercase__ , default=8 , help='''Batch size per TPU core.''' , ) parser.add_argument( '''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , ) parser.add_argument( '''--tpu_name''' , type=lowercase__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , ) parser.add_argument( '''--tpu_zone''' , type=lowercase__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , ) parser.add_argument( '''--gcp_project''' , type=lowercase__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' ) parser.add_argument( '''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , ) parser.add_argument( '''--train_dataset''' , type=lowercase__ , help='''Path to training dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--shuffle_buffer_size''' , type=lowercase__ , default=2**1_8 , help='''Size of the shuffle buffer (in samples)''' , ) parser.add_argument( '''--eval_dataset''' , type=lowercase__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`''' ''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , ) parser.add_argument( '''--num_epochs''' , type=lowercase__ , default=1 , help='''Number of epochs to train for.''' , ) parser.add_argument( '''--learning_rate''' , type=lowercase__ , default=1E-4 , help='''Learning rate to use for training.''' , ) parser.add_argument( '''--weight_decay_rate''' , type=lowercase__ , default=1E-3 , help='''Weight decay rate to use for training.''' , ) parser.add_argument( '''--max_length''' , type=lowercase__ , default=5_1_2 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , ) parser.add_argument( '''--mlm_probability''' , type=lowercase__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , ) parser.add_argument('''--output_dir''' , type=lowercase__ , required=lowercase__ , help='''Path to save model checkpoints to.''' ) parser.add_argument('''--hub_model_id''' , type=lowercase__ , help='''Model ID to upload to on the Hugging Face Hub.''' ) __lowerCAmelCase : Any = parser.parse_args() return args def _lowercase ( lowercase__ ): try: if args.tpu_name: __lowerCAmelCase : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: __lowerCAmelCase : str = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( '''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ''' '''--gcp_project. When running on a TPU VM, use --tpu_name local.''' ) tf.config.experimental_connect_to_cluster(lowercase__ ) tf.tpu.experimental.initialize_tpu_system(lowercase__ ) return tpu def _lowercase ( lowercase__ ): __lowerCAmelCase : List[Any] = 0 for file in file_list: __lowerCAmelCase : int = file.split('''/''' )[-1] __lowerCAmelCase : List[Any] = re.search(r'''-\d+-(\d+)\.tfrecord''' , lowercase__ ).group(1 ) __lowerCAmelCase : str = int(lowercase__ ) num_samples += sample_count return num_samples def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): __lowerCAmelCase : Any = count_samples(lowercase__ ) __lowerCAmelCase : Optional[int] = tf.data.Dataset.from_tensor_slices(lowercase__ ) if shuffle: __lowerCAmelCase : Optional[int] = dataset.shuffle(len(lowercase__ ) ) __lowerCAmelCase : str = tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here __lowerCAmelCase : Tuple = dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) ) __lowerCAmelCase : List[str] = dataset.map(lowercase__ , num_parallel_calls=lowercase__ ) if shuffle: assert shuffle_buffer_size is not None __lowerCAmelCase : Dict = dataset.shuffle(args.shuffle_buffer_size ) __lowerCAmelCase : Dict = dataset.batch(lowercase__ , drop_remainder=lowercase__ ) __lowerCAmelCase : str = dataset.map(lowercase__ , num_parallel_calls=lowercase__ ) __lowerCAmelCase : List[Any] = dataset.prefetch(lowercase__ ) return dataset def _lowercase ( lowercase__ ): if not args.no_tpu: __lowerCAmelCase : Optional[Any] = initialize_tpu(lowercase__ ) __lowerCAmelCase : Optional[Any] = tf.distribute.TPUStrategy(lowercase__ ) else: __lowerCAmelCase : List[str] = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' ) __lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer ) __lowerCAmelCase : str = AutoConfig.from_pretrained(args.pretrained_model_config ) __lowerCAmelCase : List[Any] = tokenizer.vocab_size __lowerCAmelCase : str = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) __lowerCAmelCase : str = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) __lowerCAmelCase : Dict = count_samples(lowercase__ ) __lowerCAmelCase : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) __lowerCAmelCase : Union[str, Any] = steps_per_epoch * args.num_epochs with strategy.scope(): __lowerCAmelCase : Optional[Any] = TFAutoModelForMaskedLM.from_config(lowercase__ ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = create_optimizer( num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase__ , metrics=['''accuracy'''] ) def decode_fn(lowercase__ ): __lowerCAmelCase : Optional[int] = { '''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), '''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase__ , lowercase__ ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. __lowerCAmelCase : str = DataCollatorForLanguageModeling( tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors='''tf''' ) def mask_with_collator(lowercase__ ): # TF really needs an isin() function __lowerCAmelCase : List[Any] = ( ~tf.cast(batch['''attention_mask'''] , tf.bool ) | (batch['''input_ids'''] == tokenizer.cls_token_id) | (batch['''input_ids'''] == tokenizer.sep_token_id) ) __lowerCAmelCase, __lowerCAmelCase : str = data_collator.tf_mask_tokens( batch['''input_ids'''] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , ) return batch __lowerCAmelCase : str = args.per_replica_batch_size * strategy.num_replicas_in_sync __lowerCAmelCase : Tuple = prepare_dataset( lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , ) __lowerCAmelCase : Union[str, Any] = prepare_dataset( lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , ) __lowerCAmelCase : Union[str, Any] = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) ) model.fit( lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": _UpperCamelCase = parse_args() main(args)
275
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ): __lowerCAmelCase : int = round(val / multiple ) * multiple if max_val is not None and x > max_val: __lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple return x __lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : int = output_size # determine new height and width __lowerCAmelCase : Optional[Any] = output_height / input_height __lowerCAmelCase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __lowerCAmelCase : str = scale_width else: # fit height __lowerCAmelCase : str = scale_height __lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ ) __lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ ) return (new_height, new_width) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384} __lowerCAmelCase : Dict = get_size_dict(A_ ) __lowerCAmelCase : Optional[Any] = do_resize __lowerCAmelCase : int = size __lowerCAmelCase : Dict = keep_aspect_ratio __lowerCAmelCase : List[Any] = ensure_multiple_of __lowerCAmelCase : Tuple = resample __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Any = rescale_factor __lowerCAmelCase : List[Any] = do_normalize __lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : int = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase : Union[str, Any] = get_resize_output_image_size( A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image: '''simple docstring''' __lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Optional[int] = size if size is not None else self.size __lowerCAmelCase : Union[str, Any] = get_size_dict(A_ ) __lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __lowerCAmelCase : Tuple = resample if resample is not None else self.resample __lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __lowerCAmelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : Dict = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __lowerCAmelCase : Optional[int] = target_sizes.numpy() __lowerCAmelCase : List[str] = [] for idx in range(len(A_ ) ): __lowerCAmelCase : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __lowerCAmelCase : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __lowerCAmelCase : Any = logits.argmax(dim=1 ) __lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
275
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1) _UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __lowercase : _UpperCamelCase = 42 _UpperCamelCase = 42 class __lowercase : def __init__( self , A_ ) ->None: '''simple docstring''' __lowerCAmelCase : Node | None = None for i in sorted(A_ , reverse=A_ ): __lowerCAmelCase : Any = Node(A_ , self.head ) def __iter__( self ) ->Iterator[int]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = self.head while node: yield node.data __lowerCAmelCase : List[str] = node.next_node def __len__( self ) ->int: '''simple docstring''' return sum(1 for _ in self ) def __str__( self ) ->str: '''simple docstring''' return " -> ".join([str(A_ ) for node in self] ) def _lowercase ( lowercase__ , lowercase__ ): return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
275
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __lowerCAmelCase : Dict = Vector() def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(A_ ) , '''(0,0,0,0,0,1)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3, 4] ) self.assertEqual(len(A_ ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Vector([1, 2] ) __lowerCAmelCase : Optional[int] = Vector([1, 2, 3, 4, 5] ) __lowerCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __lowerCAmelCase : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : List[Any] = Vector([2, -1, 4] ) # for test of dot product __lowerCAmelCase : Optional[int] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : Any = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , A_ , A_ ) ) , '''(3,4,7)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) __lowerCAmelCase : Optional[Any] = x.copy() self.assertEqual(str(A_ ) , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(A_ ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
275
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _UpperCamelCase = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def _lowercase ( lowercase__ ): if isinstance(lowercase__ , torch.Tensor ): return image elif isinstance(lowercase__ , PIL.Image.Image ): __lowerCAmelCase : List[str] = [image] __lowerCAmelCase : Union[str, Any] = [trans(img.convert('''RGB''' ) ) for img in image] __lowerCAmelCase : Dict = torch.stack(lowercase__ ) return image class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM __lowerCAmelCase : int = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=A_ , scheduler=A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Any = min(int(num_inference_steps * strength ) , A_ ) __lowerCAmelCase : Tuple = max(num_inference_steps - init_timestep , 0 ) __lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_=None ) ->Optional[Any]: '''simple docstring''' if not isinstance(A_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A_ )}""" ) __lowerCAmelCase : List[str] = image.to(device=A_ , dtype=A_ ) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __lowerCAmelCase : Optional[int] = init_latents.shape __lowerCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) # get latents print('''add noise to latents at timestep''' , A_ ) __lowerCAmelCase : Optional[int] = self.scheduler.add_noise(A_ , A_ , A_ ) __lowerCAmelCase : Optional[int] = init_latents return latents @torch.no_grad() def __call__( self , A_ = None , A_ = 0.8 , A_ = 1 , A_ = None , A_ = 0.0 , A_ = 50 , A_ = None , A_ = "pil" , A_ = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(A_ ) # 2. Preprocess image __lowerCAmelCase : Dict = preprocess(A_ ) # 3. set timesteps self.scheduler.set_timesteps(A_ , device=self.device ) __lowerCAmelCase, __lowerCAmelCase : str = self.get_timesteps(A_ , A_ , self.device ) __lowerCAmelCase : int = timesteps[:1].repeat(A_ ) # 4. Prepare latent variables __lowerCAmelCase : List[Any] = self.prepare_latents(A_ , A_ , A_ , self.unet.dtype , self.device , A_ ) __lowerCAmelCase : str = latents # 5. Denoising loop for t in self.progress_bar(A_ ): # 1. predict noise model_output __lowerCAmelCase : Tuple = self.unet(A_ , A_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 __lowerCAmelCase : List[Any] = self.scheduler.step( A_ , A_ , A_ , eta=A_ , use_clipped_model_output=A_ , generator=A_ , ).prev_sample __lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCAmelCase : Dict = self.numpy_to_pil(A_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=A_ )
275
def _lowercase ( lowercase__ , lowercase__ ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
275
1
from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """trajectory_transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int: '''simple docstring''' __lowerCAmelCase : Any = vocab_size __lowerCAmelCase : Tuple = action_weight __lowerCAmelCase : Tuple = reward_weight __lowerCAmelCase : Union[str, Any] = value_weight __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : str = block_size __lowerCAmelCase : Optional[Any] = action_dim __lowerCAmelCase : Union[str, Any] = observation_dim __lowerCAmelCase : Union[str, Any] = transition_dim __lowerCAmelCase : Dict = learning_rate __lowerCAmelCase : Any = n_layer __lowerCAmelCase : Any = n_head __lowerCAmelCase : Optional[int] = n_embd __lowerCAmelCase : str = embd_pdrop __lowerCAmelCase : Dict = attn_pdrop __lowerCAmelCase : Optional[int] = resid_pdrop __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : Any = kaiming_initializer_range __lowerCAmelCase : List[str] = use_cache super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
275
def _lowercase ( lowercase__ , lowercase__ ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _lowercase ( lowercase__ , lowercase__=0 ): return sorted(lowercase__ , key=lambda lowercase__ : x[column] ) def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase__ ): __lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : Tuple = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(min(6 , points_counts - 1 ) , lowercase__ ): for j in range(max(0 , i - 6 ) , lowercase__ ): __lowerCAmelCase : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : int = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): # base case if points_counts <= 3: return dis_between_closest_pair(lowercase__ , lowercase__ ) # recursion __lowerCAmelCase : Optional[Any] = points_counts // 2 __lowerCAmelCase : Optional[Any] = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[:mid] , lowercase__ ) __lowerCAmelCase : str = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[mid:] , points_counts - mid ) __lowerCAmelCase : Optional[int] = min(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase__ ) __lowerCAmelCase : List[Any] = dis_between_closest_in_strip( lowercase__ , len(lowercase__ ) , lowercase__ ) return min(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = column_based_sort(lowercase__ , column=0 ) __lowerCAmelCase : Any = column_based_sort(lowercase__ , column=1 ) return ( closest_pair_of_points_sqr( lowercase__ , lowercase__ , lowercase__ ) ) ** 0.5 if __name__ == "__main__": _UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
275
1
import os import sys import unittest _UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _UpperCamelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py") _UpperCamelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : List[Any] = get_test_to_tester_mapping(A_ ) __lowerCAmelCase : List[str] = get_test_to_tester_mapping(A_ ) __lowerCAmelCase : Tuple = {'''BertModelTest''': '''BertModelTester'''} __lowerCAmelCase : List[str] = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(A_ ) , A_ ) self.assertEqual(get_test_info.to_json(A_ ) , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : int = get_model_to_test_mapping(A_ ) __lowerCAmelCase : Optional[Any] = get_model_to_test_mapping(A_ ) __lowerCAmelCase : Optional[int] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } __lowerCAmelCase : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(A_ ) , A_ ) self.assertEqual(get_test_info.to_json(A_ ) , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[int] = get_model_to_tester_mapping(A_ ) __lowerCAmelCase : Union[str, Any] = get_model_to_tester_mapping(A_ ) __lowerCAmelCase : Any = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } __lowerCAmelCase : int = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(A_ ) , A_ ) self.assertEqual(get_test_info.to_json(A_ ) , A_ )
275
def _lowercase ( lowercase__ = 2_0_0 ): __lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __lowerCAmelCase : Dict = [0] * (pence + 1) __lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
275
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[Any] = {} __lowerCAmelCase : Tuple = job['''started_at'''] __lowerCAmelCase : Tuple = job['''completed_at'''] __lowerCAmelCase : Tuple = date_parser.parse(lowercase__ ) __lowerCAmelCase : Any = date_parser.parse(lowercase__ ) __lowerCAmelCase : int = round((end_datetime - start_datetime).total_seconds() / 6_0.0 ) __lowerCAmelCase : Optional[int] = start __lowerCAmelCase : Dict = end __lowerCAmelCase : Dict = duration_in_min return job_info def _lowercase ( lowercase__ , lowercase__=None ): __lowerCAmelCase : Optional[int] = None if token is not None: __lowerCAmelCase : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} __lowerCAmelCase : Tuple = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" __lowerCAmelCase : Union[str, Any] = requests.get(lowercase__ , headers=lowercase__ ).json() __lowerCAmelCase : Optional[int] = {} try: job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} ) __lowerCAmelCase : List[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 ) for i in range(lowercase__ ): __lowerCAmelCase : Any = requests.get(url + f"""&page={i + 2}""" , headers=lowercase__ ).json() job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") _UpperCamelCase = parser.parse_args() _UpperCamelCase = get_job_time(args.workflow_run_id) _UpperCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"{k}: {v['duration']}")
275
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ConsistencyModelPipeline _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _UpperCamelCase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase__ ( self , A_=False ) ->Dict: '''simple docstring''' if class_cond: __lowerCAmelCase : List[str] = self.dummy_cond_unet else: __lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Dict = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple: '''simple docstring''' if str(A_ ).startswith('''mps''' ): __lowerCAmelCase : str = torch.manual_seed(A_ ) else: __lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Any = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Dict = None __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = torch.manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ ) __lowerCAmelCase : Union[str, Any] = latents return inputs def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]: '''simple docstring''' if type(A_ ) == str: __lowerCAmelCase : int = torch.device(A_ ) __lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) return latents def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = self.get_inputs() __lowerCAmelCase : Any = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : List[Any] = self.get_inputs() __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
275
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def _lowercase ( lowercase__ ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , lowercase__ , ) if isinstance(lowercase__ , torch.Tensor ): return image elif isinstance(lowercase__ , PIL.Image.Image ): __lowerCAmelCase : Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): __lowerCAmelCase, __lowerCAmelCase : List[Any] = image[0].size __lowerCAmelCase, __lowerCAmelCase : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 __lowerCAmelCase : List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] __lowerCAmelCase : List[str] = np.concatenate(lowercase__ , axis=0 ) __lowerCAmelCase : Optional[Any] = np.array(lowercase__ ).astype(np.floataa ) / 2_5_5.0 __lowerCAmelCase : Optional[Any] = image.transpose(0 , 3 , 1 , 2 ) __lowerCAmelCase : List[str] = 2.0 * image - 1.0 __lowerCAmelCase : List[str] = torch.from_numpy(lowercase__ ) elif isinstance(image[0] , torch.Tensor ): __lowerCAmelCase : int = torch.cat(lowercase__ , dim=0 ) return image def _lowercase ( lowercase__ ): if isinstance(lowercase__ , torch.Tensor ): return mask elif isinstance(lowercase__ , PIL.Image.Image ): __lowerCAmelCase : List[Any] = [mask] if isinstance(mask[0] , PIL.Image.Image ): __lowerCAmelCase, __lowerCAmelCase : Dict = mask[0].size __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32 __lowerCAmelCase : Optional[int] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] __lowerCAmelCase : Optional[Any] = np.concatenate(lowercase__ , axis=0 ) __lowerCAmelCase : Tuple = mask.astype(np.floataa ) / 2_5_5.0 __lowerCAmelCase : Any = 0 __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Dict = torch.from_numpy(lowercase__ ) elif isinstance(mask[0] , torch.Tensor ): __lowerCAmelCase : List[str] = torch.cat(lowercase__ , dim=0 ) return mask class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 def __init__( self , A_ , A_ ) ->int: '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ , A_ , A_ = 250 , A_ = 0.0 , A_ = 10 , A_ = 10 , A_ = None , A_ = "pil" , A_ = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = image __lowerCAmelCase : Dict = _preprocess_image(A_ ) __lowerCAmelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype ) __lowerCAmelCase : Any = _preprocess_mask(A_ ) __lowerCAmelCase : Dict = mask_image.to(device=self.device , dtype=self.unet.dtype ) __lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) __lowerCAmelCase : Optional[Any] = original_image.shape __lowerCAmelCase : Tuple = randn_tensor(A_ , generator=A_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(A_ , A_ , A_ , self.device ) __lowerCAmelCase : List[Any] = eta __lowerCAmelCase : Dict = self.scheduler.timesteps[0] + 1 __lowerCAmelCase : Any = generator[0] if isinstance(A_ , A_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual __lowerCAmelCase : Optional[Any] = self.unet(A_ , A_ ).sample # compute previous image: x_t -> x_t-1 __lowerCAmelCase : Union[str, Any] = self.scheduler.step(A_ , A_ , A_ , A_ , A_ , A_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t __lowerCAmelCase : Any = self.scheduler.undo_step(A_ , A_ , A_ ) __lowerCAmelCase : int = t __lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCAmelCase : List[Any] = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
275
from collections import deque from .hash_table import HashTable class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' super().__init__(*A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCAmelCase : int = self.values[key] def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , A_ , A_=None ) ->str: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
275
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """pix2struct_text_model""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """hidden_size""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , A_=5_0244 , A_=768 , A_=64 , A_=2048 , A_=12 , A_=12 , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=1.0 , A_="gelu_new" , A_=0 , A_=False , A_=0 , A_=1 , A_=False , A_=True , **A_ , ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = vocab_size __lowerCAmelCase : List[str] = hidden_size __lowerCAmelCase : Optional[int] = d_kv __lowerCAmelCase : Optional[Any] = d_ff __lowerCAmelCase : Optional[Any] = num_layers __lowerCAmelCase : Tuple = num_heads __lowerCAmelCase : Optional[int] = relative_attention_num_buckets __lowerCAmelCase : List[str] = relative_attention_max_distance __lowerCAmelCase : str = dropout_rate __lowerCAmelCase : List[Any] = layer_norm_epsilon __lowerCAmelCase : Tuple = initializer_factor __lowerCAmelCase : List[str] = use_cache __lowerCAmelCase : Tuple = eos_token_id __lowerCAmelCase : int = decoder_start_token_id # for backwards compatibility __lowerCAmelCase : Optional[int] = dense_act_fn super().__init__( pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , tie_word_embeddings=A_ , is_decoder=A_ , **A_ , ) @classmethod def UpperCamelCase__ ( cls , A_ , **A_ ) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(A_ ) __lowerCAmelCase, __lowerCAmelCase : List[str] = cls.get_config_dict(A_ , **A_ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": __lowerCAmelCase : List[Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(A_ , **A_ ) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """pix2struct_vision_model""" def __init__( self , A_=768 , A_=768 , A_=2048 , A_=64 , A_=12 , A_=12 , A_="gelu_new" , A_=1e-6 , A_=0.0 , A_=0.0 , A_=1e-10 , A_=1.0 , A_=4096 , A_=32 , A_=128 , **A_ , ) ->Optional[int]: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : Optional[Any] = patch_embed_hidden_size __lowerCAmelCase : Tuple = d_ff __lowerCAmelCase : Optional[int] = dropout_rate __lowerCAmelCase : Tuple = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : List[str] = initializer_factor __lowerCAmelCase : List[str] = attention_dropout __lowerCAmelCase : Optional[Any] = layer_norm_eps __lowerCAmelCase : Dict = dense_act_fn __lowerCAmelCase : int = seq_len __lowerCAmelCase : Optional[int] = relative_attention_num_buckets __lowerCAmelCase : str = relative_attention_max_distance __lowerCAmelCase : Dict = d_kv @classmethod def UpperCamelCase__ ( cls , A_ , **A_ ) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(A_ ) __lowerCAmelCase, __lowerCAmelCase : str = cls.get_config_dict(A_ , **A_ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": __lowerCAmelCase : List[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(A_ , **A_ ) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """pix2struct""" _UpperCamelCase = True def __init__( self , A_=None , A_=None , A_=1.0 , A_=0.02 , A_=False , A_=False , A_=True , **A_ , ) ->int: '''simple docstring''' super().__init__(tie_word_embeddings=A_ , is_encoder_decoder=A_ , **A_ ) if text_config is None: __lowerCAmelCase : Any = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: __lowerCAmelCase : Tuple = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) __lowerCAmelCase : List[Any] = PixaStructTextConfig(**A_ ) __lowerCAmelCase : Tuple = PixaStructVisionConfig(**A_ ) __lowerCAmelCase : Any = self.text_config.decoder_start_token_id __lowerCAmelCase : Dict = self.text_config.pad_token_id __lowerCAmelCase : List[Any] = self.text_config.eos_token_id __lowerCAmelCase : Tuple = initializer_factor __lowerCAmelCase : Any = initializer_range __lowerCAmelCase : Optional[int] = self.initializer_range __lowerCAmelCase : List[str] = self.initializer_range __lowerCAmelCase : Tuple = is_vqa @classmethod def UpperCamelCase__ ( cls , A_ , A_ , **A_ ) ->List[str]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = copy.deepcopy(self.__dict__ ) __lowerCAmelCase : int = self.text_config.to_dict() __lowerCAmelCase : int = self.vision_config.to_dict() __lowerCAmelCase : Union[str, Any] = self.__class__.model_type return output
275
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Optional[Any] = global_rng __lowerCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = parent __lowerCAmelCase : Optional[int] = batch_size __lowerCAmelCase : Any = min_seq_length __lowerCAmelCase : Tuple = max_seq_length __lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Dict = feature_size __lowerCAmelCase : Optional[int] = padding_value __lowerCAmelCase : Tuple = sampling_rate __lowerCAmelCase : Union[str, Any] = return_attention_mask __lowerCAmelCase : Dict = do_normalize def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WavaVecaFeatureExtractor def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : List[Any] = np.asarray(A_ ) __lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : str = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 ) __lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths] __lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : List[str] = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ ) __lowerCAmelCase : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : List[str] = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) __lowerCAmelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : int = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Optional[int] = feat_extract( A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' import torch __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa ) __lowerCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCamelCase__ ( self ) ->int: '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ ) __lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
275
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __lowercase : def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = parent __lowerCAmelCase : Optional[Any] = batch_size __lowerCAmelCase : Any = seq_length __lowerCAmelCase : Dict = is_training __lowerCAmelCase : Optional[int] = use_input_mask __lowerCAmelCase : Optional[Any] = use_token_type_ids __lowerCAmelCase : Optional[Any] = use_labels __lowerCAmelCase : List[str] = vocab_size __lowerCAmelCase : List[Any] = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : List[str] = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : Optional[Any] = hidden_act __lowerCAmelCase : Union[str, Any] = hidden_dropout_prob __lowerCAmelCase : int = attention_probs_dropout_prob __lowerCAmelCase : Tuple = max_position_embeddings __lowerCAmelCase : int = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Any = initializer_range __lowerCAmelCase : Any = num_labels __lowerCAmelCase : Union[str, Any] = num_choices __lowerCAmelCase : Any = scope def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Tuple = None if self.use_input_mask: __lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : str = None __lowerCAmelCase : List[str] = None __lowerCAmelCase : Dict = None __lowerCAmelCase : Optional[int] = None if self.use_labels: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = FalconModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ ) __lowerCAmelCase : List[str] = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Dict: '''simple docstring''' __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = FalconModel(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) __lowerCAmelCase : Tuple = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) __lowerCAmelCase : int = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Any: '''simple docstring''' __lowerCAmelCase : List[str] = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Any = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Any: '''simple docstring''' __lowerCAmelCase : List[str] = True __lowerCAmelCase : Any = True __lowerCAmelCase : Any = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass __lowerCAmelCase : List[Any] = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) __lowerCAmelCase : int = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __lowerCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) __lowerCAmelCase : Optional[int] = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0] __lowerCAmelCase : List[Any] = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0] # select random slice __lowerCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach() __lowerCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : str = config_and_inputs __lowerCAmelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else () _UpperCamelCase = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = FalconModelTester(self ) __lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase, *__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __lowerCAmelCase : int = alibi self.model_tester.create_and_check_model(A_ , *A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Dict = 3 __lowerCAmelCase : Union[str, Any] = input_dict['''input_ids'''] __lowerCAmelCase : str = input_ids.ne(1 ).to(A_ ) __lowerCAmelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowerCAmelCase : Tuple = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Union[str, Any] = 3 __lowerCAmelCase : Optional[Any] = '''single_label_classification''' __lowerCAmelCase : Optional[int] = input_dict['''input_ids'''] __lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A_ ) __lowerCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowerCAmelCase : int = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : List[Any] = input_dict['''input_ids'''] __lowerCAmelCase : List[str] = FalconForCausalLM(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , use_cache=A_ ) __lowerCAmelCase : Any = input_ids.shape[0] __lowerCAmelCase : Optional[int] = model._convert_to_rw_cache(result.past_key_values ) __lowerCAmelCase : Optional[Any] = model._convert_cache_to_standard_format(A_ , A_ ) for layer in range(len(A_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : str = 3 __lowerCAmelCase : Union[str, Any] = '''multi_label_classification''' __lowerCAmelCase : List[Any] = input_dict['''input_ids'''] __lowerCAmelCase : Tuple = input_ids.ne(1 ).to(A_ ) __lowerCAmelCase : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __lowerCAmelCase : Optional[int] = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Dict = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_class in self.all_generative_model_classes: __lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(A_ , '''use_cache''' ): return __lowerCAmelCase : Optional[int] = model_class(A_ ).to(A_ ) if "use_cache" not in inputs: __lowerCAmelCase : Union[str, Any] = True __lowerCAmelCase : Tuple = model(**A_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __lowerCAmelCase : Union[str, Any] = ( getattr(A_ , '''decoder_layers''' , A_ ) or getattr(A_ , '''num_decoder_layers''' , A_ ) or config.num_hidden_layers ) __lowerCAmelCase : List[str] = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads ) __lowerCAmelCase : Optional[int] = getattr(A_ , '''d_model''' , config.hidden_size ) __lowerCAmelCase : List[str] = embed_dim // num_attention_heads __lowerCAmelCase : List[Any] = outputs['''past_key_values'''] self.assertEqual(len(A_ ) , A_ ) __lowerCAmelCase, __lowerCAmelCase : List[str] = inputs['''input_ids'''].shape for i in range(A_ ): if config.new_decoder_architecture: __lowerCAmelCase : List[Any] = config.num_attention_heads elif config.multi_query: __lowerCAmelCase : int = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __lowercase (unittest.TestCase ): @slow def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : str = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) __lowerCAmelCase : List[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) model.eval() model.to(A_ ) __lowerCAmelCase : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ ) __lowerCAmelCase : Union[str, Any] = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) __lowerCAmelCase : Tuple = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 ) __lowerCAmelCase : Optional[int] = tokenizer.batch_decode(A_ )[0] self.assertEqual(A_ , A_ ) @slow def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ ) __lowerCAmelCase : Optional[Any] = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(A_ ) __lowerCAmelCase : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**A_ , do_sample=A_ , max_new_tokens=4 ) model.generate(**A_ , do_sample=A_ , max_new_tokens=4 ) model.generate(**A_ , num_beams=2 , max_new_tokens=4 ) @slow def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __lowerCAmelCase : Any = AutoTokenizer.from_pretrained(A_ ) __lowerCAmelCase : Tuple = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(device=A_ ) __lowerCAmelCase : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ ) # Test results are the same with and without cache __lowerCAmelCase : Dict = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ ) __lowerCAmelCase : List[str] = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
275
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : List[Any] = use_input_mask __lowerCAmelCase : Optional[int] = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : int = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : List[str] = relative_attention __lowerCAmelCase : Union[str, Any] = position_biased_input __lowerCAmelCase : int = pos_att_type __lowerCAmelCase : List[Any] = scope def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCAmelCase : List[str] = None if self.use_token_type_ids: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : int = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_config() __lowerCAmelCase : Dict = 300 return config def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0] __lowerCAmelCase : List[str] = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = DebertaModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase (unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. __lowerCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
275
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _lowercase ( lowercase__ , lowercase__=False ): __lowerCAmelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowerCAmelCase : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _lowercase ( lowercase__ , lowercase__ , lowercase__=False ): for i in range(config.num_hidden_layers ): if base_model: __lowerCAmelCase : List[str] = '''''' else: __lowerCAmelCase : Union[str, Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" ) __lowerCAmelCase : int = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] __lowerCAmelCase : Any = in_proj_bias[: config.hidden_size] __lowerCAmelCase : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCAmelCase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCAmelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] __lowerCAmelCase : Tuple = in_proj_bias[-config.hidden_size :] def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. __lowerCAmelCase : int = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = dct.pop(lowercase__ ) __lowerCAmelCase : int = val def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[int] = ViTMSNConfig() __lowerCAmelCase : Optional[Any] = 1_0_0_0 __lowerCAmelCase : str = '''datasets/huggingface/label-files''' __lowerCAmelCase : Optional[Any] = '''imagenet-1k-id2label.json''' __lowerCAmelCase : int = json.load(open(hf_hub_download(lowercase__ , lowercase__ ) , '''r''' ) ) __lowerCAmelCase : Dict = {int(lowercase__ ): v for k, v in idalabel.items()} __lowerCAmelCase : str = idalabel __lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: __lowerCAmelCase : Union[str, Any] = 3_8_4 __lowerCAmelCase : Tuple = 1_5_3_6 __lowerCAmelCase : Union[str, Any] = 6 elif "l16" in checkpoint_url: __lowerCAmelCase : Optional[int] = 1_0_2_4 __lowerCAmelCase : Any = 4_0_9_6 __lowerCAmelCase : Optional[Any] = 2_4 __lowerCAmelCase : List[Any] = 1_6 __lowerCAmelCase : Optional[Any] = 0.1 elif "b4" in checkpoint_url: __lowerCAmelCase : List[str] = 4 elif "l7" in checkpoint_url: __lowerCAmelCase : Any = 7 __lowerCAmelCase : str = 1_0_2_4 __lowerCAmelCase : Union[str, Any] = 4_0_9_6 __lowerCAmelCase : int = 2_4 __lowerCAmelCase : Optional[int] = 1_6 __lowerCAmelCase : Dict = 0.1 __lowerCAmelCase : Any = ViTMSNModel(lowercase__ ) __lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(lowercase__ , map_location='''cpu''' )['''target_encoder'''] __lowerCAmelCase : Optional[Any] = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowercase__ ) __lowerCAmelCase : Any = create_rename_keys(lowercase__ , base_model=lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ , base_model=lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() __lowerCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) __lowerCAmelCase : List[str] = ViTImageProcessor( size=config.image_size , image_mean=lowercase__ , image_std=lowercase__ ) __lowerCAmelCase : Dict = image_processor(images=lowercase__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) __lowerCAmelCase : str = model(**lowercase__ ) __lowerCAmelCase : Optional[int] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: __lowerCAmelCase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: __lowerCAmelCase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: __lowerCAmelCase : Tuple = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: __lowerCAmelCase : Optional[Any] = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: __lowerCAmelCase : Tuple = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , lowercase__ , atol=1E-4 ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _UpperCamelCase = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
275
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowercase ( lowercase__ ): __lowerCAmelCase : str = [] __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : str = [] for rt in rc.restypes: __lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) __lowerCAmelCase : List[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Optional[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Tuple = torch.tensor( lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , ) __lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : int = residx_atomaa_mask __lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask __lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): __lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter] __lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __lowerCAmelCase : str = rc.atom_order[atom_name] __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : Any = residx_atomaa_mask return protein def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray ) __lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) ) return out
275
1
def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = [0 for i in range(r + 1 )] # nc0 = 1 __lowerCAmelCase : int = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowerCAmelCase : List[str] = min(lowercase__ , lowercase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
275
def _lowercase ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowerCAmelCase : int = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": _UpperCamelCase = input("Enter a string ").strip() _UpperCamelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
275
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class __lowercase : def __init__( self , A_ , A_=13 , A_=7 , A_=False , A_=True , A_=False , A_=False , A_=19 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Optional[int] = batch_size __lowerCAmelCase : Any = seq_length __lowerCAmelCase : Tuple = is_training __lowerCAmelCase : int = use_input_mask __lowerCAmelCase : Optional[Any] = use_token_type_ids __lowerCAmelCase : List[str] = use_labels __lowerCAmelCase : Optional[int] = vocab_size __lowerCAmelCase : Optional[int] = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Optional[Any] = num_attention_heads __lowerCAmelCase : Union[str, Any] = intermediate_size __lowerCAmelCase : Tuple = hidden_act __lowerCAmelCase : Optional[Any] = hidden_dropout_prob __lowerCAmelCase : Tuple = attention_probs_dropout_prob __lowerCAmelCase : List[Any] = max_position_embeddings __lowerCAmelCase : List[Any] = type_vocab_size __lowerCAmelCase : Tuple = type_sequence_label_size __lowerCAmelCase : int = initializer_range __lowerCAmelCase : Dict = num_labels __lowerCAmelCase : List[str] = num_choices __lowerCAmelCase : str = scope def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : str = None if self.use_input_mask: __lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : int = None __lowerCAmelCase : Tuple = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Optional[Any] = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=A_ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , ) return config def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = EsmForProteinFolding(config=A_ ).float() model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ ) __lowerCAmelCase : Any = model(A_ ) __lowerCAmelCase : Optional[Any] = model(A_ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Union[str, Any] = config_and_inputs __lowerCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = False _UpperCamelCase = (EsmForProteinFolding,) if is_torch_available() else () _UpperCamelCase = () _UpperCamelCase = {} if is_torch_available() else {} _UpperCamelCase = False def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = EsmFoldModelTester(self ) __lowerCAmelCase : int = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) @unittest.skip('''Does not support attention outputs''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' pass @unittest.skip def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @unittest.skip('''Esm does not support embedding resizing''' ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' pass @unittest.skip('''Esm does not support embedding resizing''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support passing input embeds!''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support head pruning.''' ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not output hidden states in the normal way.''' ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' pass @unittest.skip('''ESMfold does not output hidden states in the normal way.''' ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' pass @unittest.skip('''ESMFold only has one output format.''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' pass @unittest.skip('''ESMFold does not support input chunking.''' ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @unittest.skip('''ESMFold doesn\'t support data parallel.''' ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' pass @require_torch class __lowercase (_UpperCAmelCase ): @slow def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float() model.eval() __lowerCAmelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowerCAmelCase : List[str] = model(A_ )['''positions'''] __lowerCAmelCase : Any = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , A_ , atol=1e-4 ) )
275
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None class __lowercase (_UpperCAmelCase , _UpperCAmelCase ): _UpperCamelCase = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]: '''simple docstring''' __lowerCAmelCase : str = num_inference_steps __lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ ) __lowerCAmelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) __lowerCAmelCase : str = sigma + gamma * sigma __lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = sample_prev + sigma_prev * model_output __lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' raise NotImplementedError()
275
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__=False ): __lowerCAmelCase : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowerCAmelCase : int = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _lowercase ( lowercase__ , lowercase__ , lowercase__=False ): for i in range(config.num_hidden_layers ): if base_model: __lowerCAmelCase : List[Any] = '''''' else: __lowerCAmelCase : Optional[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCAmelCase : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) __lowerCAmelCase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : List[str] = in_proj_weight[ : config.hidden_size, : ] __lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size] __lowerCAmelCase : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCAmelCase : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCAmelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] __lowerCAmelCase : Dict = in_proj_bias[-config.hidden_size :] def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Tuple = dct.pop(lowercase__ ) __lowerCAmelCase : List[Any] = val def _lowercase ( ): __lowerCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase : int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = ViTConfig() __lowerCAmelCase : List[Any] = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Dict = int(vit_name[-1_2:-1_0] ) __lowerCAmelCase : Optional[int] = int(vit_name[-9:-6] ) else: __lowerCAmelCase : List[str] = 1_0_0_0 __lowerCAmelCase : int = '''huggingface/label-files''' __lowerCAmelCase : int = '''imagenet-1k-id2label.json''' __lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) ) __lowerCAmelCase : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()} __lowerCAmelCase : int = idalabel __lowerCAmelCase : Any = {v: k for k, v in idalabel.items()} __lowerCAmelCase : Tuple = int(vit_name[-6:-4] ) __lowerCAmelCase : Dict = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): __lowerCAmelCase : Tuple = 1_9_2 __lowerCAmelCase : Optional[Any] = 7_6_8 __lowerCAmelCase : Optional[int] = 1_2 __lowerCAmelCase : Union[str, Any] = 3 elif vit_name[9:].startswith('''small''' ): __lowerCAmelCase : Any = 3_8_4 __lowerCAmelCase : Optional[int] = 1_5_3_6 __lowerCAmelCase : Dict = 1_2 __lowerCAmelCase : int = 6 else: pass else: if vit_name[4:].startswith('''small''' ): __lowerCAmelCase : Any = 7_6_8 __lowerCAmelCase : List[Any] = 2_3_0_4 __lowerCAmelCase : Optional[Any] = 8 __lowerCAmelCase : Any = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): __lowerCAmelCase : Dict = 1_0_2_4 __lowerCAmelCase : Tuple = 4_0_9_6 __lowerCAmelCase : int = 2_4 __lowerCAmelCase : List[str] = 1_6 elif vit_name[4:].startswith('''huge''' ): __lowerCAmelCase : Optional[int] = 1_2_8_0 __lowerCAmelCase : Optional[Any] = 5_1_2_0 __lowerCAmelCase : int = 3_2 __lowerCAmelCase : Any = 1_6 # load original model from timm __lowerCAmelCase : Dict = timm.create_model(lowercase__ , pretrained=lowercase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __lowerCAmelCase : List[str] = timm_model.state_dict() if base_model: remove_classification_head_(lowercase__ ) __lowerCAmelCase : List[Any] = create_rename_keys(lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ ) # load HuggingFace model if vit_name[-5:] == "in21k": __lowerCAmelCase : Tuple = ViTModel(lowercase__ ).eval() else: __lowerCAmelCase : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval() model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: __lowerCAmelCase : Optional[Any] = DeiTImageProcessor(size=config.image_size ) else: __lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size ) __lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ) __lowerCAmelCase : int = encoding['''pixel_values'''] __lowerCAmelCase : str = model(lowercase__ ) if base_model: __lowerCAmelCase : List[str] = timm_model.forward_features(lowercase__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 ) else: __lowerCAmelCase : Any = timm_model(lowercase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _UpperCamelCase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
275
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : int = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowerCAmelCase : int = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase : List[Any] = self.tokenizer.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCAmelCase : Optional[int] = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase : Any = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: __lowerCAmelCase : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] __lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features] __lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ ) __lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features] __lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ ) __lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
275
1
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ConsistencyModelPipeline _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _UpperCamelCase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase__ ( self , A_=False ) ->Dict: '''simple docstring''' if class_cond: __lowerCAmelCase : List[str] = self.dummy_cond_unet else: __lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Dict = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple: '''simple docstring''' if str(A_ ).startswith('''mps''' ): __lowerCAmelCase : str = torch.manual_seed(A_ ) else: __lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Any = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Dict = None __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = torch.manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ ) __lowerCAmelCase : Union[str, Any] = latents return inputs def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]: '''simple docstring''' if type(A_ ) == str: __lowerCAmelCase : int = torch.device(A_ ) __lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) return latents def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = self.get_inputs() __lowerCAmelCase : Any = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : List[Any] = self.get_inputs() __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
275
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): _UpperCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = (3, 32, 128) __lowerCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off __lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) __lowerCAmelCase : Union[str, Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __lowerCAmelCase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Optional[int] = self.prepare_image_inputs() __lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' ) __lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Any = '''test''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : str = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = '''test''' __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : int = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Optional[int] = processor.char_decode(A_ ) __lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ ) __lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 ) __lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 ) __lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 ) __lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
275
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _lowercase ( lowercase__=None ): if subparsers is not None: __lowerCAmelCase : List[str] = subparsers.add_parser('''env''' ) else: __lowerCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=lowercase__ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=lowercase__ ) return parser def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = torch.__version__ __lowerCAmelCase : Dict = torch.cuda.is_available() __lowerCAmelCase : Union[str, Any] = is_xpu_available() __lowerCAmelCase : int = is_npu_available() __lowerCAmelCase : Tuple = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowercase__ ): __lowerCAmelCase : List[Any] = load_config_from_file(args.config_file ).to_dict() __lowerCAmelCase : Optional[Any] = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""", '''PyTorch XPU available''': str(lowercase__ ), '''PyTorch NPU available''': str(lowercase__ ), '''System RAM''': f"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""", } if pt_cuda_available: __lowerCAmelCase : List[str] = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __lowerCAmelCase : Optional[int] = ( '''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowercase__ , lowercase__ ) else f"""\t{accelerate_config}""" ) print(lowercase__ ) __lowerCAmelCase : Union[str, Any] = accelerate_config return info def _lowercase ( ): __lowerCAmelCase : List[str] = env_command_parser() __lowerCAmelCase : Optional[int] = parser.parse_args() env_command(lowercase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
275
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
1
from statistics import mean, stdev def _lowercase ( lowercase__ , lowercase__ = 3 ): __lowerCAmelCase : Optional[int] = min(lowercase__ ) __lowerCAmelCase : List[Any] = max(lowercase__ ) # normalize data return [round((x - x_min) / (x_max - x_min) , lowercase__ ) for x in data] def _lowercase ( lowercase__ , lowercase__ = 3 ): __lowerCAmelCase : List[str] = mean(lowercase__ ) __lowerCAmelCase : Dict = stdev(lowercase__ ) # standardize data return [round((x - mu) / (sigma) , lowercase__ ) for x in data]
275
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _UpperCamelCase = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _UpperCamelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _UpperCamelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] ) return (item, float(lowercase__ )) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 ) __lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:] __lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = list(lowercase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCAmelCase : int = random.choice(lowercase__ ) return "".join(lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : str = [] # Generate more children proportionally to the fitness score. __lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1 __lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n for _ in range(lowercase__ ): __lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0] __lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ ) # Append new string to the population list. pop.append(mutate(lowercase__ , lowercase__ ) ) pop.append(mutate(lowercase__ , lowercase__ ) ) return pop def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(lowercase__ ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCAmelCase : Any = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(lowercase__ ) # Generate random starting population. __lowerCAmelCase : List[Any] = [] for _ in range(lowercase__ ): population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowercase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population] # Check if there is a matching evolution. __lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowercase__ ) # Normalize population score to be between 0 and 1. __lowerCAmelCase : List[Any] = [ (item, score / len(lowercase__ )) for item, score in population_score ] # This is selection for i in range(lowercase__ ): population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowercase__ ) > N_POPULATION: break if __name__ == "__main__": _UpperCamelCase = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _UpperCamelCase = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
275
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _UpperCamelCase = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self , A_ , A_ , A_ = None , A_ = None ) ->int: '''simple docstring''' __lowerCAmelCase : Dict = None __lowerCAmelCase : Any = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) __lowerCAmelCase : Optional[int] = os.path.abspath('''examples''' ) for item in os.listdir(A_ ): if item not in EXCLUDE_EXAMPLES: __lowerCAmelCase : int = os.path.join(A_ , A_ ) if os.path.isfile(A_ ) and ".py" in item_path: with self.subTest( tested_script=A_ , feature_script=A_ , tested_section='''main()''' if parser_only else '''training_function()''' , ): __lowerCAmelCase : str = compare_against_test( os.path.join(A_ , A_ ) , A_ , A_ , A_ ) __lowerCAmelCase : Union[str, Any] = '''\n'''.join(A_ ) if special_strings is not None: for string in special_strings: __lowerCAmelCase : Any = diff.replace(A_ , '''''' ) self.assertEqual(A_ , '''''' ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' self.one_complete_example('''complete_nlp_example.py''' , A_ ) self.one_complete_example('''complete_nlp_example.py''' , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) __lowerCAmelCase : List[str] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , A_ , A_ , A_ ) self.one_complete_example('''complete_cv_example.py''' , A_ , A_ , A_ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = False @classmethod def UpperCamelCase__ ( cls ) ->Union[str, Any]: '''simple docstring''' super().setUpClass() __lowerCAmelCase : Dict = tempfile.mkdtemp() __lowerCAmelCase : Tuple = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) __lowerCAmelCase : Dict = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def UpperCamelCase__ ( cls ) ->List[str]: '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : int = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() __lowerCAmelCase : Dict = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() __lowerCAmelCase : Any = run_command(self._launch_args + testargs , return_stdout=A_ ) self.assertNotIn('''epoch 0:''' , A_ ) self.assertIn('''epoch 1:''' , A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Dict = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() __lowerCAmelCase : List[str] = run_command(self._launch_args + testargs , return_stdout=A_ ) if torch.cuda.is_available(): __lowerCAmelCase : List[Any] = torch.cuda.device_count() else: __lowerCAmelCase : Tuple = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , A_ ) self.assertIn('''epoch 1:''' , A_ ) else: self.assertIn('''epoch 0:''' , A_ ) self.assertIn('''epoch 1:''' , A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Tuple = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): __lowerCAmelCase : List[Any] = run_command(self._launch_args + testargs , return_stdout=A_ ) __lowerCAmelCase : Tuple = re.findall('''({.+})''' , A_ ) __lowerCAmelCase : Tuple = [r for r in results if '''accuracy''' in r][-1] __lowerCAmelCase : List[str] = ast.literal_eval(A_ ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: __lowerCAmelCase : List[Any] = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(A_ , '''tracking''' ) ) ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Dict = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
275
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase = logging.getLogger(__name__) def _lowercase ( lowercase__ , lowercase__ ): return (preds == labels).mean() @dataclass class __lowercase : _UpperCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _UpperCamelCase = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _UpperCamelCase = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _UpperCamelCase = field( default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowercase : _UpperCamelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) _UpperCamelCase = field(metadata={"""help""": """Should contain the data files for the task."""} ) _UpperCamelCase = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _UpperCamelCase = field( default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _lowercase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , lowercase__ ) # Set seed set_seed(training_args.seed ) try: __lowerCAmelCase : int = processors[data_args.task_name]() __lowerCAmelCase : int = processor.get_labels() __lowerCAmelCase : Tuple = len(lowercase__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCAmelCase : Optional[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) # Get datasets __lowerCAmelCase : Dict = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowercase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __lowerCAmelCase : List[str] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowercase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowercase__ ) -> Dict: __lowerCAmelCase : int = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(lowercase__ , p.label_ids )} # Data collator __lowerCAmelCase : Any = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowerCAmelCase : Dict = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCAmelCase : Tuple = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCAmelCase : Any = trainer.evaluate() __lowerCAmelCase : Any = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(lowercase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , lowercase__ , lowercase__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(lowercase__ ) return results def _lowercase ( lowercase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
275
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """table-transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , A_=True , A_=None , A_=3 , A_=100 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __lowerCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A_ , A_ ): __lowerCAmelCase : int = backbone_config.get('''model_type''' ) __lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] __lowerCAmelCase : Any = config_class.from_dict(A_ ) # set timm attributes to None __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = None, None, None __lowerCAmelCase : Tuple = use_timm_backbone __lowerCAmelCase : Optional[Any] = backbone_config __lowerCAmelCase : List[str] = num_channels __lowerCAmelCase : Tuple = num_queries __lowerCAmelCase : int = d_model __lowerCAmelCase : List[Any] = encoder_ffn_dim __lowerCAmelCase : Optional[int] = encoder_layers __lowerCAmelCase : List[str] = encoder_attention_heads __lowerCAmelCase : str = decoder_ffn_dim __lowerCAmelCase : Union[str, Any] = decoder_layers __lowerCAmelCase : Any = decoder_attention_heads __lowerCAmelCase : Optional[int] = dropout __lowerCAmelCase : Any = attention_dropout __lowerCAmelCase : Tuple = activation_dropout __lowerCAmelCase : Optional[Any] = activation_function __lowerCAmelCase : List[str] = init_std __lowerCAmelCase : Tuple = init_xavier_std __lowerCAmelCase : Any = encoder_layerdrop __lowerCAmelCase : List[Any] = decoder_layerdrop __lowerCAmelCase : Optional[Any] = encoder_layers __lowerCAmelCase : Optional[Any] = auxiliary_loss __lowerCAmelCase : Optional[Any] = position_embedding_type __lowerCAmelCase : Tuple = backbone __lowerCAmelCase : Any = use_pretrained_backbone __lowerCAmelCase : int = dilation # Hungarian matcher __lowerCAmelCase : Dict = class_cost __lowerCAmelCase : List[str] = bbox_cost __lowerCAmelCase : int = giou_cost # Loss coefficients __lowerCAmelCase : Optional[Any] = mask_loss_coefficient __lowerCAmelCase : Tuple = dice_loss_coefficient __lowerCAmelCase : int = bbox_loss_coefficient __lowerCAmelCase : List[Any] = giou_loss_coefficient __lowerCAmelCase : int = eos_coefficient super().__init__(is_encoder_decoder=A_ , **A_ ) @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return self.d_model class __lowercase (_UpperCAmelCase ): _UpperCamelCase = version.parse("""1.11""" ) @property def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def UpperCamelCase__ ( self ) ->float: '''simple docstring''' return 1e-5 @property def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return 12
275
1
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Any = global_rng __lowerCAmelCase : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : str = min_seq_length __lowerCAmelCase : int = max_seq_length __lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Any = padding_value __lowerCAmelCase : str = sampling_rate __lowerCAmelCase : Optional[Any] = return_attention_mask __lowerCAmelCase : Optional[Any] = do_normalize __lowerCAmelCase : Optional[Any] = feature_size __lowerCAmelCase : Optional[int] = chunk_length __lowerCAmelCase : Optional[Any] = hop_length def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0] check_json_file_has_correct_format(A_ ) __lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ ) __lowerCAmelCase : Dict = feat_extract_first.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters __lowerCAmelCase : Dict = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A_ ) __lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ ) __lowerCAmelCase : List[str] = feat_extract_first.to_dict() __lowerCAmelCase : Tuple = feat_extract_second.to_dict() __lowerCAmelCase : Any = feat_extract_first.mel_filters __lowerCAmelCase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : Optional[int] = np.asarray(A_ ) __lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] __lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated] __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' import torch __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) __lowerCAmelCase : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on __lowerCAmelCase : int = self._load_datasamples(1 ) __lowerCAmelCase : Any = WhisperFeatureExtractor() __lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = self._load_datasamples(1 )[0] __lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue __lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0] self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
275
1
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __lowercase : _UpperCamelCase = 42 _UpperCamelCase = None # Automatically constructed _UpperCamelCase = "dict" _UpperCamelCase = None _UpperCamelCase = field(default="""Translation""" , init=_UpperCAmelCase , repr=_UpperCAmelCase ) def __call__( self ) ->List[Any]: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCamelCase__ ( self ) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __lowercase : _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None # Automatically constructed _UpperCamelCase = "dict" _UpperCamelCase = None _UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=_UpperCAmelCase , repr=_UpperCAmelCase ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[int] = sorted(set(self.languages ) ) if self.languages else None __lowerCAmelCase : Optional[Any] = len(self.languages ) if self.languages else None def __call__( self ) ->Dict: '''simple docstring''' return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Dict = set(self.languages ) if self.languages and set(A_ ) - lang_set: raise ValueError( f"""Some languages in example ({", ".join(sorted(set(A_ ) - lang_set ) )}) are not in valid set ({", ".join(A_ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __lowerCAmelCase : Optional[int] = [] for lang, text in translation_dict.items(): if isinstance(A_ , A_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __lowerCAmelCase, __lowerCAmelCase : Any = zip(*sorted(A_ ) ) return {"language": languages, "translation": translations} def UpperCamelCase__ ( self ) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
275
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } _UpperCamelCase = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = {} with open(lowercase__ , '''r''' ) as file: for line_number, line in enumerate(lowercase__ ): __lowerCAmelCase : Any = line.strip() if line: __lowerCAmelCase : Dict = line.split() __lowerCAmelCase : str = line_number __lowerCAmelCase : List[str] = words[0] __lowerCAmelCase : Any = value return result def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): for attribute in key.split('''.''' ): __lowerCAmelCase : List[Any] = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : List[Any] = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : str = getattr(lowercase__ , lowercase__ ).shape elif weight_type is not None and weight_type == "param": __lowerCAmelCase : Dict = hf_pointer for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : str = shape_pointer.shape # let's reduce dimension __lowerCAmelCase : Any = value[0] else: __lowerCAmelCase : str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[str] = value elif weight_type == "weight_v": __lowerCAmelCase : int = value elif weight_type == "bias": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = value else: __lowerCAmelCase : Any = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __lowerCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]] __lowerCAmelCase : int = '''param''' if weight_type is not None and weight_type != "param": __lowerCAmelCase : Tuple = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __lowerCAmelCase : List[str] = '''.'''.join([key, hf_param_name] ) else: __lowerCAmelCase : Optional[int] = key __lowerCAmelCase : Union[str, Any] = value if '''lm_head''' in full_key else value[0] _UpperCamelCase = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): __lowerCAmelCase : Any = False for key, mapped_key in MAPPING.items(): __lowerCAmelCase : Tuple = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCAmelCase : Optional[Any] = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(lowercase__ )[0].split('''.''' )[-2] __lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __lowerCAmelCase : List[Any] = '''weight_g''' elif "weight_v" in name: __lowerCAmelCase : List[Any] = '''weight_v''' elif "bias" in name: __lowerCAmelCase : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : int = '''weight''' else: __lowerCAmelCase : Any = None if hf_dict is not None: rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return is_used return is_used def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Optional[Any] = fairseq_model.state_dict() __lowerCAmelCase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Any = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCAmelCase : int = True else: __lowerCAmelCase : Dict = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ ) if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = full_name.split('''conv_layers.''' )[-1] __lowerCAmelCase : List[str] = name.split('''.''' ) __lowerCAmelCase : Any = int(items[0] ) __lowerCAmelCase : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __lowerCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowerCAmelCase : Optional[int] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ): if config_path is not None: __lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase__ ) else: __lowerCAmelCase : Optional[int] = WavaVecaConfig() if is_seq_class: __lowerCAmelCase : Optional[Any] = read_txt_into_dict(lowercase__ ) __lowerCAmelCase : int = idalabel __lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowercase__ ) __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) feature_extractor.save_pretrained(lowercase__ ) elif is_finetuned: if dict_path: __lowerCAmelCase : List[str] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCAmelCase : List[Any] = target_dict.pad_index __lowerCAmelCase : List[Any] = target_dict.bos_index __lowerCAmelCase : Optional[int] = target_dict.eos_index __lowerCAmelCase : Any = len(target_dict.symbols ) __lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowerCAmelCase : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : int = 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __lowerCAmelCase : Dict = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __lowerCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False __lowerCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __lowerCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __lowerCAmelCase : str = WavaVecaForCTC(lowercase__ ) else: __lowerCAmelCase : Any = WavaVecaForPreTraining(lowercase__ ) if is_finetuned or is_seq_class: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) __lowerCAmelCase : str = fairseq.tasks.setup_task(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __lowerCAmelCase : int = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
275
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """sew-d""" def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_=2 , A_=512 , A_=256 , A_=True , A_=True , A_=("p2c", "c2p") , A_="layer_norm" , A_="gelu_python" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.1 , A_=0.02 , A_=1e-7 , A_=1e-5 , A_="group" , A_="gelu" , A_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A_=False , A_=128 , A_=16 , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_="mean" , A_=False , A_=False , A_=256 , A_=0 , A_=1 , A_=2 , **A_ , ) ->Optional[Any]: '''simple docstring''' super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ ) __lowerCAmelCase : Tuple = hidden_size __lowerCAmelCase : List[Any] = feat_extract_norm __lowerCAmelCase : List[Any] = feat_extract_activation __lowerCAmelCase : Union[str, Any] = list(A_ ) __lowerCAmelCase : List[str] = list(A_ ) __lowerCAmelCase : Tuple = list(A_ ) __lowerCAmelCase : Optional[int] = conv_bias __lowerCAmelCase : Any = num_conv_pos_embeddings __lowerCAmelCase : str = num_conv_pos_embedding_groups __lowerCAmelCase : int = len(self.conv_dim ) __lowerCAmelCase : Dict = num_hidden_layers __lowerCAmelCase : str = intermediate_size __lowerCAmelCase : Union[str, Any] = squeeze_factor __lowerCAmelCase : Any = max_position_embeddings __lowerCAmelCase : Dict = position_buckets __lowerCAmelCase : Tuple = share_att_key __lowerCAmelCase : Union[str, Any] = relative_attention __lowerCAmelCase : Any = norm_rel_ebd __lowerCAmelCase : Optional[Any] = list(A_ ) __lowerCAmelCase : Dict = hidden_act __lowerCAmelCase : Dict = num_attention_heads __lowerCAmelCase : Tuple = hidden_dropout __lowerCAmelCase : Any = attention_dropout __lowerCAmelCase : str = activation_dropout __lowerCAmelCase : Any = feat_proj_dropout __lowerCAmelCase : List[Any] = final_dropout __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : List[str] = feature_layer_norm_eps __lowerCAmelCase : int = initializer_range __lowerCAmelCase : int = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase : List[str] = apply_spec_augment __lowerCAmelCase : Optional[Any] = mask_time_prob __lowerCAmelCase : int = mask_time_length __lowerCAmelCase : Union[str, Any] = mask_time_min_masks __lowerCAmelCase : int = mask_feature_prob __lowerCAmelCase : Dict = mask_feature_length __lowerCAmelCase : Dict = mask_feature_min_masks # ctc loss __lowerCAmelCase : str = ctc_loss_reduction __lowerCAmelCase : Dict = ctc_zero_infinity # sequence classification __lowerCAmelCase : Optional[int] = use_weighted_layer_sum __lowerCAmelCase : Optional[Any] = classifier_proj_size @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
275
from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """trajectory_transformer""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int: '''simple docstring''' __lowerCAmelCase : Any = vocab_size __lowerCAmelCase : Tuple = action_weight __lowerCAmelCase : Tuple = reward_weight __lowerCAmelCase : Union[str, Any] = value_weight __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : str = block_size __lowerCAmelCase : Optional[Any] = action_dim __lowerCAmelCase : Union[str, Any] = observation_dim __lowerCAmelCase : Union[str, Any] = transition_dim __lowerCAmelCase : Dict = learning_rate __lowerCAmelCase : Any = n_layer __lowerCAmelCase : Any = n_head __lowerCAmelCase : Optional[int] = n_embd __lowerCAmelCase : str = embd_pdrop __lowerCAmelCase : Dict = attn_pdrop __lowerCAmelCase : Optional[int] = resid_pdrop __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Optional[int] = layer_norm_eps __lowerCAmelCase : Any = kaiming_initializer_range __lowerCAmelCase : List[str] = use_cache super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
275
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _UpperCamelCase = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
def _lowercase ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b" __lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:] __lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
1
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : List[Any] = use_input_mask __lowerCAmelCase : Optional[int] = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : int = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : List[str] = relative_attention __lowerCAmelCase : Union[str, Any] = position_biased_input __lowerCAmelCase : int = pos_att_type __lowerCAmelCase : List[Any] = scope def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCAmelCase : List[str] = None if self.use_token_type_ids: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : int = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_config() __lowerCAmelCase : Dict = 300 return config def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0] __lowerCAmelCase : List[str] = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = DebertaModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase (unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. __lowerCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
275
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ): __lowerCAmelCase : int = round(val / multiple ) * multiple if max_val is not None and x > max_val: __lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple return x __lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : int = output_size # determine new height and width __lowerCAmelCase : Optional[Any] = output_height / input_height __lowerCAmelCase : List[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __lowerCAmelCase : str = scale_width else: # fit height __lowerCAmelCase : str = scale_height __lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ ) __lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ ) return (new_height, new_width) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = ["""pixel_values"""] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None: '''simple docstring''' super().__init__(**A_ ) __lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384} __lowerCAmelCase : Dict = get_size_dict(A_ ) __lowerCAmelCase : Optional[Any] = do_resize __lowerCAmelCase : int = size __lowerCAmelCase : Dict = keep_aspect_ratio __lowerCAmelCase : List[Any] = ensure_multiple_of __lowerCAmelCase : Tuple = resample __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Any = rescale_factor __lowerCAmelCase : List[Any] = do_normalize __lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' __lowerCAmelCase : int = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase : Union[str, Any] = get_resize_output_image_size( A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict: '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray: '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image: '''simple docstring''' __lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase : Optional[int] = size if size is not None else self.size __lowerCAmelCase : Union[str, Any] = get_size_dict(A_ ) __lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __lowerCAmelCase : Tuple = resample if resample is not None else self.resample __lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale __lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean __lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __lowerCAmelCase : Optional[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images] if do_resize: __lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_rescale: __lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images] __lowerCAmelCase : Dict = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __lowerCAmelCase : Optional[int] = target_sizes.numpy() __lowerCAmelCase : List[str] = [] for idx in range(len(A_ ) ): __lowerCAmelCase : Any = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __lowerCAmelCase : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __lowerCAmelCase : Any = logits.argmax(dim=1 ) __lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
275
1
import math import sys import cva import numpy as np def _lowercase ( lowercase__ , lowercase__ ): # For applying gaussian function for each element in matrix. __lowerCAmelCase : Optional[int] = math.sqrt(lowercase__ ) __lowerCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[int] = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def _lowercase ( lowercase__ , lowercase__ ): # Creates a gaussian kernel of given dimension. __lowerCAmelCase : int = np.zeros((kernel_size, kernel_size) ) for i in range(0 , lowercase__ ): for j in range(0 , lowercase__ ): __lowerCAmelCase : Optional[Any] = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : Any = np.zeros(img.shape ) __lowerCAmelCase : int = get_gauss_kernel(lowercase__ , lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : Dict = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __lowerCAmelCase : Any = get_slice(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCAmelCase : Union[str, Any] = img_s - img_s[kernel_size // 2, kernel_size // 2] __lowerCAmelCase : Dict = vec_gaussian(lowercase__ , lowercase__ ) __lowerCAmelCase : Optional[int] = np.multiply(lowercase__ , lowercase__ ) __lowerCAmelCase : Optional[int] = np.multiply(lowercase__ , lowercase__ ) __lowerCAmelCase : Union[str, Any] = np.sum(lowercase__ ) / np.sum(lowercase__ ) __lowerCAmelCase : List[str] = val return imga def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = args[1] if args[1:] else '''../image_data/lena.jpg''' __lowerCAmelCase : Union[str, Any] = float(args[2] ) if args[2:] else 1.0 __lowerCAmelCase : str = float(args[3] ) if args[3:] else 1.0 if args[4:]: __lowerCAmelCase : Union[str, Any] = int(args[4] ) __lowerCAmelCase : Any = kernel_size + abs(kernel_size % 2 - 1 ) else: __lowerCAmelCase : Any = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parse_args(sys.argv) _UpperCamelCase = cva.imread(filename, 0) cva.imshow("input image", img) _UpperCamelCase = img / 255 _UpperCamelCase = out.astype("float32") _UpperCamelCase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) _UpperCamelCase = out * 255 _UpperCamelCase = np.uinta(out) cva.imshow("output image", out) cva.waitKey(0) cva.destroyAllWindows()
275
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __lowerCAmelCase : Dict = Vector() def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(A_ ) , '''(0,0,0,0,0,1)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3, 4] ) self.assertEqual(len(A_ ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Vector([1, 2] ) __lowerCAmelCase : Optional[int] = Vector([1, 2, 3, 4, 5] ) __lowerCAmelCase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __lowerCAmelCase : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Vector([1, 2, 3] ) __lowerCAmelCase : List[str] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : List[Any] = Vector([2, -1, 4] ) # for test of dot product __lowerCAmelCase : Optional[int] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : str = Vector([1, 2, 3] ) __lowerCAmelCase : Any = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , A_ , A_ ) ) , '''(3,4,7)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] ) __lowerCAmelCase : Optional[Any] = x.copy() self.assertEqual(str(A_ ) , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(A_ ) , '''(0,1,0)''' ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(A_ , A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __lowerCAmelCase : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(A_ ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' __lowerCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __lowerCAmelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def UpperCamelCase__ ( self ) ->None: '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
275
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Any = global_rng __lowerCAmelCase : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Dict = batch_size __lowerCAmelCase : str = min_seq_length __lowerCAmelCase : int = max_seq_length __lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Any = padding_value __lowerCAmelCase : str = sampling_rate __lowerCAmelCase : Optional[Any] = return_attention_mask __lowerCAmelCase : Optional[Any] = do_normalize __lowerCAmelCase : Optional[Any] = feature_size __lowerCAmelCase : Optional[int] = chunk_length __lowerCAmelCase : Optional[Any] = hop_length def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase : Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0] check_json_file_has_correct_format(A_ ) __lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ ) __lowerCAmelCase : Dict = feat_extract_first.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters __lowerCAmelCase : Dict = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A_ ) __lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ ) __lowerCAmelCase : List[str] = feat_extract_first.to_dict() __lowerCAmelCase : Tuple = feat_extract_second.to_dict() __lowerCAmelCase : Any = feat_extract_first.mel_filters __lowerCAmelCase : List[str] = feat_extract_second.mel_filters self.assertTrue(np.allclose(A_ , A_ ) ) self.assertEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : Optional[int] = np.asarray(A_ ) __lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] __lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] __lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated] __lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features __lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' import torch __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) __lowerCAmelCase : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = torch.tensor( [ 0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951, 0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678, 0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554, -0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854 ] ) # fmt: on __lowerCAmelCase : int = self._load_datasamples(1 ) __lowerCAmelCase : Any = WhisperFeatureExtractor() __lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = self._load_datasamples(1 )[0] __lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue __lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0] self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
275
def _lowercase ( lowercase__ , lowercase__ ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
275
1
_UpperCamelCase = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } _UpperCamelCase = {value: key for key, value in encode_dict.items()} def _lowercase ( lowercase__ ): __lowerCAmelCase : int = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _lowercase ( lowercase__ ): if set(lowercase__ ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __lowerCAmelCase : Optional[Any] = '''''' for word in coded.split(): while len(lowercase__ ) != 0: decoded += decode_dict[word[:5]] __lowerCAmelCase : List[str] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
275
def _lowercase ( lowercase__ , lowercase__ ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _lowercase ( lowercase__ , lowercase__=0 ): return sorted(lowercase__ , key=lambda lowercase__ : x[column] ) def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase__ ): __lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : Tuple = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__=float('''inf''' ) ): for i in range(min(6 , points_counts - 1 ) , lowercase__ ): for j in range(max(0 , i - 6 ) , lowercase__ ): __lowerCAmelCase : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase : int = current_dis return min_dis def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): # base case if points_counts <= 3: return dis_between_closest_pair(lowercase__ , lowercase__ ) # recursion __lowerCAmelCase : Optional[Any] = points_counts // 2 __lowerCAmelCase : Optional[Any] = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[:mid] , lowercase__ ) __lowerCAmelCase : str = closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[mid:] , points_counts - mid ) __lowerCAmelCase : Optional[int] = min(lowercase__ , lowercase__ ) __lowerCAmelCase : Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase__ ) __lowerCAmelCase : List[Any] = dis_between_closest_in_strip( lowercase__ , len(lowercase__ ) , lowercase__ ) return min(lowercase__ , lowercase__ ) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = column_based_sort(lowercase__ , column=0 ) __lowerCAmelCase : Any = column_based_sort(lowercase__ , column=1 ) return ( closest_pair_of_points_sqr( lowercase__ , lowercase__ , lowercase__ ) ) ** 0.5 if __name__ == "__main__": _UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
275
1
from collections import deque from .hash_table import HashTable class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' super().__init__(*A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCAmelCase : int = self.values[key] def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , A_ , A_=None ) ->str: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
275
def _lowercase ( lowercase__ = 2_0_0 ): __lowerCAmelCase : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __lowerCAmelCase : Dict = [0] * (pence + 1) __lowerCAmelCase : Optional[int] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowercase__ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
275
1
import os from datetime import datetime as dt from github import Github _UpperCamelCase = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def _lowercase ( ): __lowerCAmelCase : Dict = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCAmelCase : Tuple = g.get_repo('''huggingface/diffusers''' ) __lowerCAmelCase : Dict = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCAmelCase : Dict = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __lowerCAmelCase : Optional[int] = comments[0] if len(lowercase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
275
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ConsistencyModelPipeline _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _UpperCamelCase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def UpperCamelCase__ ( self , A_=False ) ->Dict: '''simple docstring''' if class_cond: __lowerCAmelCase : List[str] = self.dummy_cond_unet else: __lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Dict = { '''unet''': unet, '''scheduler''': scheduler, } return components def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple: '''simple docstring''' if str(A_ ).startswith('''mps''' ): __lowerCAmelCase : str = torch.manual_seed(A_ ) else: __lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Tuple = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[str] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : List[Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : int = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : List[Any] = None __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : Any = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ ) __lowerCAmelCase : Union[str, Any] = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_dummy_inputs(A_ ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Dict = None __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str: '''simple docstring''' __lowerCAmelCase : Dict = torch.manual_seed(A_ ) __lowerCAmelCase : Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ ) __lowerCAmelCase : Union[str, Any] = latents return inputs def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]: '''simple docstring''' if type(A_ ) == str: __lowerCAmelCase : int = torch.device(A_ ) __lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) return latents def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : str = self.get_inputs() __lowerCAmelCase : Any = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : List[Any] = self.get_inputs() __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : str = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] __lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : Dict = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Dict = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , ) __lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ ) pipe.to(torch_device=A_ , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ ) __lowerCAmelCase : Any = 1 __lowerCAmelCase : int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ): __lowerCAmelCase : int = pipe(**A_ ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
275
1
import socket def _lowercase ( ): __lowerCAmelCase : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __lowerCAmelCase : List[str] = socket.gethostname() __lowerCAmelCase : Tuple = 1_2_3_1_2 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: __lowerCAmelCase : str = sock.recv(1_0_2_4 ) if not data: break out_file.write(lowercase__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
275
from collections import deque from .hash_table import HashTable class __lowercase (_UpperCAmelCase ): def __init__( self , *A_ , **A_ ) ->int: '''simple docstring''' super().__init__(*A_ , **A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCAmelCase : int = self.values[key] def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def UpperCamelCase__ ( self , A_ , A_=None ) ->str: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
275
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): _UpperCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = (3, 32, 128) __lowerCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off __lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) __lowerCAmelCase : Union[str, Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __lowerCAmelCase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Optional[int] = self.prepare_image_inputs() __lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' ) __lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Any = '''test''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : str = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = '''test''' __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : int = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Optional[int] = processor.char_decode(A_ ) __lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ ) __lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 ) __lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 ) __lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 ) __lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
275
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _UpperCamelCase = random.Random() def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): if rng is None: __lowerCAmelCase : Optional[Any] = global_rng __lowerCAmelCase : Tuple = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowercase (unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = parent __lowerCAmelCase : Optional[int] = batch_size __lowerCAmelCase : Any = min_seq_length __lowerCAmelCase : Tuple = max_seq_length __lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Dict = feature_size __lowerCAmelCase : Optional[int] = padding_value __lowerCAmelCase : Tuple = sampling_rate __lowerCAmelCase : Union[str, Any] = return_attention_mask __lowerCAmelCase : Dict = do_normalize def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]: '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: __lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase : Tuple = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs class __lowercase (_UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = WavaVecaFeatureExtractor def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self ) def UpperCamelCase__ ( self , A_ ) ->Optional[Any]: '''simple docstring''' self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowerCAmelCase : List[Any] = np.asarray(A_ ) __lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : str = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 ) __lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths] __lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase : List[str] = [None, 1600, None] for max_length, padding in zip(A_ , A_ ): __lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ ) __lowerCAmelCase : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : List[str] = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) __lowerCAmelCase : int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : int = feat_extract( A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : Optional[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) __lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __lowerCAmelCase : Optional[int] = feat_extract( A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) __lowerCAmelCase : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' import torch __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa ) __lowerCAmelCase : List[Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCamelCase__ ( self ) ->int: '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ ) __lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
275
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """gpt_bigcode""" _UpperCamelCase = ["""past_key_values"""] _UpperCamelCase = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , A_=5_0257 , A_=1024 , A_=768 , A_=12 , A_=12 , A_=None , A_="gelu_pytorch_tanh" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_=True , A_=True , A_=5_0256 , A_=5_0256 , A_=True , A_=True , A_=True , **A_ , ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = vocab_size __lowerCAmelCase : Dict = n_positions __lowerCAmelCase : Dict = n_embd __lowerCAmelCase : Dict = n_layer __lowerCAmelCase : Any = n_head __lowerCAmelCase : int = n_inner __lowerCAmelCase : Optional[Any] = activation_function __lowerCAmelCase : str = resid_pdrop __lowerCAmelCase : Union[str, Any] = embd_pdrop __lowerCAmelCase : Dict = attn_pdrop __lowerCAmelCase : Optional[int] = layer_norm_epsilon __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : Optional[int] = scale_attn_weights __lowerCAmelCase : Optional[int] = use_cache __lowerCAmelCase : Any = attention_softmax_in_fpaa __lowerCAmelCase : Any = scale_attention_softmax_in_fpaa __lowerCAmelCase : Dict = multi_query __lowerCAmelCase : Tuple = bos_token_id __lowerCAmelCase : int = eos_token_id super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
275
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase (_UpperCAmelCase ): def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : List[str] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : List[Any] = is_training __lowerCAmelCase : List[Any] = use_input_mask __lowerCAmelCase : Optional[int] = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : str = vocab_size __lowerCAmelCase : int = hidden_size __lowerCAmelCase : Any = num_hidden_layers __lowerCAmelCase : Any = num_attention_heads __lowerCAmelCase : Dict = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[str] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : int = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : List[str] = relative_attention __lowerCAmelCase : Union[str, Any] = position_biased_input __lowerCAmelCase : int = pos_att_type __lowerCAmelCase : List[Any] = scope def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_input_mask: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __lowerCAmelCase : List[str] = None if self.use_token_type_ids: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : int = None __lowerCAmelCase : List[str] = None if self.use_labels: __lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_config() __lowerCAmelCase : Dict = 300 return config def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0] __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0] __lowerCAmelCase : List[str] = model(A_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int: '''simple docstring''' __lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = self.num_labels __lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : int = model( A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = DebertaModelTester(self ) __lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*A_ ) def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase (unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] # compare the actual values for a slice. __lowerCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
275
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _lowercase ( lowercase__ ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowercase (nn.Module ): def __init__( self , A_ , A_ ) ->Dict: '''simple docstring''' super().__init__() __lowerCAmelCase : str = module __lowerCAmelCase : int = nn.Sequential( nn.Linear(module.in_features , A_ , bias=A_ ) , nn.Linear(A_ , module.out_features , bias=A_ ) , ) __lowerCAmelCase : Optional[int] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=A_ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCamelCase__ ( self , A_ , *A_ , **A_ ) ->List[str]: '''simple docstring''' return self.module(A_ , *A_ , **A_ ) + self.adapter(A_ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowercase (unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module _UpperCamelCase = """bigscience/bloom-1b7""" # Constant values _UpperCamelCase = 2.109659552692574 _UpperCamelCase = """Hello my name is""" _UpperCamelCase = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) _UpperCamelCase = 10 def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(self.model_name ) class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' super().setUp() # Models and tokenizer __lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Any = self.model_abit.config self.assertTrue(hasattr(A_ , '''quantization_config''' ) ) __lowerCAmelCase : Union[str, Any] = config.to_dict() __lowerCAmelCase : Any = config.to_diff_dict() __lowerCAmelCase : Dict = config.to_json_string() def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit __lowerCAmelCase : Optional[int] = self.model_fpaa.get_memory_footprint() __lowerCAmelCase : Union[str, Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowerCAmelCase : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(A_ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCAmelCase : int = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Optional[Any] = BitsAndBytesConfig() __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=A_ , device_map='''auto''' ) __lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCAmelCase : Optional[int] = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' with self.assertRaises(A_ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(A_ ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = BitsAndBytesConfig() with self.assertRaises(A_ ): __lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=A_ , load_in_abit=A_ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' with self.assertRaises(A_ ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(A_ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(A_ ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(A_ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(A_ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowerCAmelCase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ) __lowerCAmelCase : Optional[Any] = self.model_fpaa.to(torch.floataa ) __lowerCAmelCase : Any = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowerCAmelCase : Optional[Any] = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __lowerCAmelCase : List[str] = self.model_fpaa.half() # Check this does not throw an error __lowerCAmelCase : Tuple = self.model_fpaa.float() def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A_ , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowercase (unittest.TestCase ): @classmethod def UpperCamelCase__ ( cls ) ->Any: '''simple docstring''' __lowerCAmelCase : str = '''t5-small''' __lowerCAmelCase : Optional[int] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(cls.model_name ) __lowerCAmelCase : List[Any] = '''Translate in German: Hello, my dog is cute''' def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' from transformers import TaForConditionalGeneration __lowerCAmelCase : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules __lowerCAmelCase : Tuple = None # test with `t5-small` __lowerCAmelCase : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' ) __lowerCAmelCase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCAmelCase : str = model.generate(**A_ ) # test with `flan-t5-small` __lowerCAmelCase : Optional[int] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=A_ , device_map='''auto''' ) __lowerCAmelCase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCAmelCase : Tuple = model.generate(**A_ ) __lowerCAmelCase : List[str] = modules def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowerCAmelCase : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowerCAmelCase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCAmelCase : List[str] = model.generate(**A_ ) # test with `flan-t5-small` __lowerCAmelCase : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=A_ , device_map='''auto''' ) __lowerCAmelCase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __lowerCAmelCase : Tuple = model.generate(**A_ ) class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' super().setUp() # model_name __lowerCAmelCase : str = '''bigscience/bloom-560m''' __lowerCAmelCase : Any = '''t5-small''' # Different types of model __lowerCAmelCase : int = AutoModel.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' ) # Sequence classification model __lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=A_ , device_map='''auto''' ) # CausalLM model __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ , device_map='''auto''' ) # Seq2seq model __lowerCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=A_ , device_map='''auto''' ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' super().setUp() def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowerCAmelCase : Any = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' super().setUp() def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=A_ , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowerCAmelCase : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __lowerCAmelCase : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A_ ) , self.EXPECTED_OUTPUTS ) class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = '''facebook/opt-350m''' super().setUp() def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A_ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowerCAmelCase : List[Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowerCAmelCase : Dict = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(A_ ) ): __lowerCAmelCase : List[Any] = LoRALayer(module.q_proj , rank=16 ) __lowerCAmelCase : List[Any] = LoRALayer(module.k_proj , rank=16 ) __lowerCAmelCase : int = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowerCAmelCase : str = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowerCAmelCase : Optional[Any] = model.forward(**A_ ) out.logits.norm().backward() for module in model.modules(): if isinstance(A_ , A_ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(A_ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = """gpt2-xl""" _UpperCamelCase = 3.3191854854152187
275
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _lowercase ( lowercase__ ): __lowerCAmelCase : str = [] __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : str = [] for rt in rc.restypes: __lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) __lowerCAmelCase : List[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Optional[Any] = torch.tensor( lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , ) __lowerCAmelCase : Tuple = torch.tensor( lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , ) __lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : int = residx_atomaa_mask __lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype] __lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask __lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): __lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter] __lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __lowerCAmelCase : str = rc.atom_order[atom_name] __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype] __lowerCAmelCase : Any = residx_atomaa_mask return protein def _lowercase ( lowercase__ ): __lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray ) __lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) ) return out
275
1
import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class __lowercase : def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->Any: '''simple docstring''' __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Union[str, Any] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : str = is_training __lowerCAmelCase : List[str] = use_token_type_ids __lowerCAmelCase : Optional[Any] = use_input_mask __lowerCAmelCase : Union[str, Any] = use_labels __lowerCAmelCase : List[Any] = use_mc_token_ids __lowerCAmelCase : Tuple = vocab_size __lowerCAmelCase : str = hidden_size __lowerCAmelCase : str = num_hidden_layers __lowerCAmelCase : int = num_attention_heads __lowerCAmelCase : Union[str, Any] = intermediate_size __lowerCAmelCase : str = hidden_act __lowerCAmelCase : Union[str, Any] = hidden_dropout_prob __lowerCAmelCase : Tuple = attention_probs_dropout_prob __lowerCAmelCase : Optional[Any] = max_position_embeddings __lowerCAmelCase : Union[str, Any] = type_vocab_size __lowerCAmelCase : List[str] = type_sequence_label_size __lowerCAmelCase : str = initializer_range __lowerCAmelCase : Any = num_labels __lowerCAmelCase : Union[str, Any] = num_choices __lowerCAmelCase : Union[str, Any] = scope __lowerCAmelCase : Optional[int] = self.vocab_size - 1 def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Any = None if self.use_input_mask: __lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Tuple = None if self.use_token_type_ids: __lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Optional[Any] = None if self.use_mc_token_ids: __lowerCAmelCase : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __lowerCAmelCase : Any = None __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : str = None if self.use_labels: __lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : List[str] = self.get_config() __lowerCAmelCase : int = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = CTRLModel(config=A_ ) model.to(A_ ) model.eval() model(A_ , token_type_ids=A_ , head_mask=A_ ) model(A_ , token_type_ids=A_ ) __lowerCAmelCase : str = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : Tuple = CTRLLMHeadModel(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Any = model(A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ( __lowerCAmelCase ), ) : Tuple = config_and_inputs __lowerCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , *A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Any = self.num_labels __lowerCAmelCase : str = CTRLForSequenceClassification(A_ ) model.to(A_ ) model.eval() __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = model(A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class __lowercase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _UpperCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () _UpperCamelCase = (CTRLLMHeadModel,) if is_torch_available() else () _UpperCamelCase = ( { """feature-extraction""": CTRLModel, """text-classification""": CTRLForSequenceClassification, """text-generation""": CTRLLMHeadModel, """zero-shot""": CTRLForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase = True _UpperCamelCase = False _UpperCamelCase = False def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ ) ->Union[str, Any]: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : int = CTRLModelTester(self ) __lowerCAmelCase : str = ConfigTester(self , config_class=A_ , n_embd=37 ) def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*A_ ) def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' pass @slow def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Any = CTRLModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' pass @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(A_ ) __lowerCAmelCase : Dict = torch.tensor( [[1_1859, 0, 1611, 8]] , dtype=torch.long , device=A_ ) # Legal the president is __lowerCAmelCase : List[Any] = [ 1_1859, 0, 1611, 8, 5, 150, 2_6449, 2, 19, 348, 469, 3, 2595, 48, 2_0740, 24_6533, 24_6533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __lowerCAmelCase : Union[str, Any] = model.generate(A_ , do_sample=A_ ) self.assertListEqual(output_ids[0].tolist() , A_ )
275
def _lowercase ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowerCAmelCase : int = sorted(string.lower() ) return len(lowercase__ ) == len(set(lowercase__ ) ) if __name__ == "__main__": _UpperCamelCase = input("Enter a string ").strip() _UpperCamelCase = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
275
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () _UpperCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). _UpperCamelCase = [0, 25, 50] _UpperCamelCase = [25, 50, 75] _UpperCamelCase = fuzz.membership.trimf(X, abca) _UpperCamelCase = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. _UpperCamelCase = np.ones(75) _UpperCamelCase = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) _UpperCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) _UpperCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) _UpperCamelCase = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) _UpperCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] _UpperCamelCase = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) _UpperCamelCase = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] _UpperCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] _UpperCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("Young") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("Middle aged") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("union") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("intersection") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("complement_a") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("difference a/b") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("alg_sum") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("alg_product") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("bdd_sum") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("bdd_difference") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
275
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None class __lowercase (_UpperCAmelCase , _UpperCAmelCase ): _UpperCamelCase = 2 @register_to_config def __init__( self , A_ = 0.02 , A_ = 100 , A_ = 1.007 , A_ = 80 , A_ = 0.05 , A_ = 50 , ) ->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def UpperCamelCase__ ( self , A_ , A_ = None ) ->torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]: '''simple docstring''' __lowerCAmelCase : str = num_inference_steps __lowerCAmelCase : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[Any] = torch.from_numpy(A_ ).to(A_ ) __lowerCAmelCase : Tuple = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : Optional[int] = torch.tensor(A_ , dtype=torch.floataa , device=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ = None ) ->Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device ) __lowerCAmelCase : str = sigma + gamma * sigma __lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output __lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = True , ) ->Union[KarrasVeOutput, Tuple]: '''simple docstring''' __lowerCAmelCase : str = sample_prev + sigma_prev * model_output __lowerCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=A_ , derivative=A_ , pred_original_sample=A_ ) def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Any: '''simple docstring''' raise NotImplementedError()
275
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = jnp.floataa _UpperCamelCase = True def UpperCamelCase__ ( self ) ->str: '''simple docstring''' super().setup() __lowerCAmelCase : int = nn.Dense(5 , dtype=self.dtype ) def __call__( self , *A_ , **A_ ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = super().__call__(*A_ , **A_ ) __lowerCAmelCase : List[str] = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __lowercase (_UpperCAmelCase ): _UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def cross_entropy(lowercase__ , lowercase__ , lowercase__=None ): __lowerCAmelCase : int = logits.shape[-1] __lowerCAmelCase : Union[str, Any] = (labels[..., None] == jnp.arange(lowercase__ )[None]).astype('''f4''' ) __lowerCAmelCase : Optional[Any] = jax.nn.log_softmax(lowercase__ , axis=-1 ) __lowerCAmelCase : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: __lowerCAmelCase : Union[str, Any] = reduction(lowercase__ ) return loss __lowerCAmelCase : str = partial(lowercase__ , reduction=jnp.mean ) __lowerCAmelCase : List[str] = cross_entropy(lowercase__ , lowercase__ ) __lowerCAmelCase : str = cross_entropy(lowercase__ , lowercase__ ) __lowerCAmelCase : Dict = cross_entropy(lowercase__ , lowercase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __lowercase : _UpperCamelCase = "google/bigbird-roberta-base" _UpperCamelCase = 3000 _UpperCamelCase = 10500 _UpperCamelCase = 128 _UpperCamelCase = 3 _UpperCamelCase = 1 _UpperCamelCase = 5 # tx_args _UpperCamelCase = 3E-5 _UpperCamelCase = 0.0 _UpperCamelCase = 20000 _UpperCamelCase = 0.0095 _UpperCamelCase = "bigbird-roberta-natural-questions" _UpperCamelCase = "training-expt" _UpperCamelCase = "data/nq-training.jsonl" _UpperCamelCase = "data/nq-validation.jsonl" def UpperCamelCase__ ( self ) ->Dict: '''simple docstring''' os.makedirs(self.base_dir , exist_ok=A_ ) __lowerCAmelCase : Optional[Any] = os.path.join(self.base_dir , self.save_dir ) __lowerCAmelCase : str = self.batch_size_per_device * jax.device_count() @dataclass class __lowercase : _UpperCamelCase = 42 _UpperCamelCase = 4096 # no dynamic padding on TPUs def __call__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.collate_fn(A_ ) __lowerCAmelCase : int = jax.tree_util.tree_map(A_ , A_ ) return batch def UpperCamelCase__ ( self , A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.fetch_inputs(features['''input_ids'''] ) __lowerCAmelCase : Dict = { '''input_ids''': jnp.array(A_ , dtype=jnp.intaa ), '''attention_mask''': jnp.array(A_ , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Any = [self._fetch_inputs(A_ ) for ids in input_ids] return zip(*A_ ) def UpperCamelCase__ ( self , A_ ) ->Dict: '''simple docstring''' __lowerCAmelCase : List[Any] = [1 for _ in range(len(A_ ) )] while len(A_ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _lowercase ( lowercase__ , lowercase__ , lowercase__=None ): if seed is not None: __lowerCAmelCase : List[str] = dataset.shuffle(seed=lowercase__ ) for i in range(len(lowercase__ ) // batch_size ): __lowerCAmelCase : Tuple = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowercase__ ) @partial(jax.pmap , axis_name='''batch''' ) def _lowercase ( lowercase__ , lowercase__ , **lowercase__ ): def loss_fn(lowercase__ ): __lowerCAmelCase : Any = model_inputs.pop('''start_labels''' ) __lowerCAmelCase : Tuple = model_inputs.pop('''end_labels''' ) __lowerCAmelCase : str = model_inputs.pop('''pooled_labels''' ) __lowerCAmelCase : Any = state.apply_fn(**lowercase__ , params=lowercase__ , dropout_rng=lowercase__ , train=lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = outputs return state.loss_fn( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) __lowerCAmelCase, __lowerCAmelCase : List[str] = jax.random.split(lowercase__ ) __lowerCAmelCase : Dict = jax.value_and_grad(lowercase__ ) __lowerCAmelCase, __lowerCAmelCase : Tuple = grad_fn(state.params ) __lowerCAmelCase : List[Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) __lowerCAmelCase : List[Any] = jax.lax.pmean(lowercase__ , '''batch''' ) __lowerCAmelCase : Optional[int] = state.apply_gradients(grads=lowercase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def _lowercase ( lowercase__ , **lowercase__ ): __lowerCAmelCase : int = model_inputs.pop('''start_labels''' ) __lowerCAmelCase : List[Any] = model_inputs.pop('''end_labels''' ) __lowerCAmelCase : str = model_inputs.pop('''pooled_labels''' ) __lowerCAmelCase : Dict = state.apply_fn(**lowercase__ , params=state.params , train=lowercase__ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Tuple = outputs __lowerCAmelCase : Optional[int] = state.loss_fn(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCAmelCase : List[str] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class __lowercase (train_state.TrainState ): _UpperCamelCase = struct.field(pytree_node=_UpperCAmelCase ) @dataclass class __lowercase : _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = 42 _UpperCamelCase = None def UpperCamelCase__ ( self , A_ , A_ , A_ , A_=None ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : List[str] = model.params __lowerCAmelCase : Optional[int] = TrainState.create( apply_fn=model.__call__ , params=A_ , tx=A_ , loss_fn=A_ , ) if ckpt_dir is not None: __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = restore_checkpoint(A_ , A_ ) __lowerCAmelCase : str = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } __lowerCAmelCase, __lowerCAmelCase : Tuple = build_tx(**A_ ) __lowerCAmelCase : Optional[Any] = train_state.TrainState( step=A_ , apply_fn=model.__call__ , params=A_ , tx=A_ , opt_state=A_ , ) __lowerCAmelCase : str = args __lowerCAmelCase : Tuple = data_collator __lowerCAmelCase : Union[str, Any] = lr __lowerCAmelCase : Dict = params __lowerCAmelCase : Any = jax_utils.replicate(A_ ) return state def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : str = self.args __lowerCAmelCase : Dict = len(A_ ) // args.batch_size __lowerCAmelCase : Optional[Any] = jax.random.PRNGKey(0 ) __lowerCAmelCase : Union[str, Any] = jax.random.split(A_ , jax.device_count() ) for epoch in range(args.max_epochs ): __lowerCAmelCase : Optional[Any] = jnp.array(0 , dtype=jnp.floataa ) __lowerCAmelCase : List[Any] = get_batched_dataset(A_ , args.batch_size , seed=A_ ) __lowerCAmelCase : Dict = 0 for batch in tqdm(A_ , total=A_ , desc=f"""Running EPOCH-{epoch}""" ): __lowerCAmelCase : Optional[int] = self.data_collator(A_ ) __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : List[str] = self.train_step_fn(A_ , A_ , **A_ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: __lowerCAmelCase : str = jax_utils.unreplicate(state.step ) __lowerCAmelCase : Any = running_loss.item() / i __lowerCAmelCase : str = self.scheduler_fn(state_step - 1 ) __lowerCAmelCase : Union[str, Any] = self.evaluate(A_ , A_ ) __lowerCAmelCase : int = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(A_ ) ) self.logger.log(A_ , commit=A_ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=A_ ) def UpperCamelCase__ ( self , A_ , A_ ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Dict = get_batched_dataset(A_ , self.args.batch_size ) __lowerCAmelCase : Dict = len(A_ ) // self.args.batch_size __lowerCAmelCase : Any = jnp.array(0 , dtype=jnp.floataa ) __lowerCAmelCase : Optional[Any] = 0 for batch in tqdm(A_ , total=A_ , desc='''Evaluating ... ''' ): __lowerCAmelCase : Optional[int] = self.data_collator(A_ ) __lowerCAmelCase : int = self.val_step_fn(A_ , **A_ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def UpperCamelCase__ ( self , A_ , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = jax_utils.unreplicate(A_ ) print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' ) self.model_save_fn(A_ , params=state.params ) with open(os.path.join(A_ , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(A_ , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(A_ , '''data_collator.joblib''' ) ) with open(os.path.join(A_ , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , A_ ) print('''DONE''' ) def _lowercase ( lowercase__ , lowercase__ ): print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' ) with open(os.path.join(lowercase__ , '''flax_model.msgpack''' ) , '''rb''' ) as f: __lowerCAmelCase : str = from_bytes(state.params , f.read() ) with open(os.path.join(lowercase__ , '''opt_state.msgpack''' ) , '''rb''' ) as f: __lowerCAmelCase : Optional[Any] = from_bytes(state.opt_state , f.read() ) __lowerCAmelCase : Any = joblib.load(os.path.join(lowercase__ , '''args.joblib''' ) ) __lowerCAmelCase : List[Any] = joblib.load(os.path.join(lowercase__ , '''data_collator.joblib''' ) ) with open(os.path.join(lowercase__ , '''training_state.json''' ) , '''r''' ) as f: __lowerCAmelCase : int = json.load(lowercase__ ) __lowerCAmelCase : List[Any] = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Optional[int] = num_train_steps - warmup_steps __lowerCAmelCase : List[Any] = optax.linear_schedule(init_value=lowercase__ , end_value=lowercase__ , transition_steps=lowercase__ ) __lowerCAmelCase : int = optax.linear_schedule(init_value=lowercase__ , end_value=1E-7 , transition_steps=lowercase__ ) __lowerCAmelCase : str = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): def weight_decay_mask(lowercase__ ): __lowerCAmelCase : Optional[Any] = traverse_util.flatten_dict(lowercase__ ) __lowerCAmelCase : str = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(lowercase__ ) __lowerCAmelCase : Optional[int] = scheduler_fn(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) __lowerCAmelCase : List[str] = optax.adamw(learning_rate=lowercase__ , weight_decay=lowercase__ , mask=lowercase__ ) return tx, lr
275
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: __lowerCAmelCase : Optional[int] = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : int = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = tensor[:sequence_length] else: __lowerCAmelCase : Optional[Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowercase ( lowercase__ ): __lowerCAmelCase : Union[str, Any] = ord(lowercase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __lowerCAmelCase : int = unicodedata.category(lowercase__ ) if cat.startswith('''P''' ): return True return False @dataclass class __lowercase (_UpperCAmelCase ): _UpperCamelCase = 42 _UpperCamelCase = True _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = -100 _UpperCamelCase = "pt" def UpperCamelCase__ ( self , A_ ) ->Optional[int]: '''simple docstring''' import torch __lowerCAmelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels''' __lowerCAmelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __lowerCAmelCase : List[Any] = self.tokenizer.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __lowerCAmelCase : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] __lowerCAmelCase : Optional[int] = self.tokenizer.padding_side if padding_side == "right": __lowerCAmelCase : Any = [ list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels ] else: __lowerCAmelCase : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels ] __lowerCAmelCase : Tuple = [feature['''ner_tags'''] for feature in features] __lowerCAmelCase : List[Any] = padding_tensor(A_ , -1 , A_ , A_ ) __lowerCAmelCase : Optional[int] = [feature['''original_entity_spans'''] for feature in features] __lowerCAmelCase : Any = padding_tensor(A_ , (-1, -1) , A_ , A_ ) __lowerCAmelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
275
1
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class __lowercase (_UpperCAmelCase ): def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() __lowerCAmelCase : Dict = 8 # DPR tok __lowerCAmelCase : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowerCAmelCase : Dict = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(A_ , exist_ok=A_ ) __lowerCAmelCase : List[str] = os.path.join(A_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok __lowerCAmelCase : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowerCAmelCase : List[str] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase : Union[str, Any] = {'''unk_token''': '''<unk>'''} __lowerCAmelCase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(A_ , exist_ok=A_ ) __lowerCAmelCase : Optional[Any] = os.path.join(A_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase : Tuple = os.path.join(A_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A_ ) ) def UpperCamelCase__ ( self ) ->DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def UpperCamelCase__ ( self ) ->BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) __lowerCAmelCase : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __lowerCAmelCase : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(A_ ) rag_tokenizer.save_pretrained(A_ ) __lowerCAmelCase : Dict = RagTokenizer.from_pretrained(A_ , config=A_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , A_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , A_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCamelCase__ ( self ) ->List[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) __lowerCAmelCase : Optional[Any] = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] __lowerCAmelCase : Union[str, Any] = tokenizer(A_ ) self.assertIsNotNone(A_ ) @slow def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) __lowerCAmelCase : Union[str, Any] = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] __lowerCAmelCase : List[str] = tokenizer(A_ ) self.assertIsNotNone(A_ )
275
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __lowercase (unittest.TestCase ): _UpperCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase__ ( self ) ->str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : Tuple = (3, 32, 128) __lowerCAmelCase : List[str] = tempfile.mkdtemp() # fmt: off __lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) ) __lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) __lowerCAmelCase : Union[str, Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self , **A_ ) ->Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) return image_input def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : Dict = self.get_tokenizer() __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : Union[str, Any] = self.get_image_processor() __lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __lowerCAmelCase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : Any = self.get_image_processor() __lowerCAmelCase : Optional[Any] = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Optional[int] = self.prepare_image_inputs() __lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' ) __lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase__ ( self ) ->str: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Union[str, Any] = self.get_tokenizer() __lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Any = '''test''' __lowerCAmelCase : Dict = processor(text=A_ ) __lowerCAmelCase : str = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' __lowerCAmelCase : Dict = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = '''test''' __lowerCAmelCase : int = self.prepare_image_inputs() __lowerCAmelCase : int = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def UpperCamelCase__ ( self ) ->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.get_image_processor() __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase : Optional[int] = processor.char_decode(A_ ) __lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ ) __lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(A_ , A_ ) def UpperCamelCase__ ( self ) ->Any: '''simple docstring''' __lowerCAmelCase : str = self.get_image_processor() __lowerCAmelCase : Any = self.get_tokenizer() __lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[Any] = self.prepare_image_inputs() __lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCamelCase__ ( self ) ->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = self.get_image_processor() __lowerCAmelCase : List[str] = self.get_tokenizer() __lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ ) __lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 ) __lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 ) __lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 ) __lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
275
1
def _lowercase ( lowercase__ ): if not isinstance(lowercase__ , lowercase__ ): __lowerCAmelCase : List[Any] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase__ ) if number < 0: return False __lowerCAmelCase : Optional[int] = number * number while number > 0: if number % 1_0 != number_square % 1_0: return False number //= 1_0 number_square //= 1_0 return True if __name__ == "__main__": import doctest doctest.testmod()
275
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase (unittest.TestCase ): @property def UpperCamelCase__ ( self ) ->Tuple: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCamelCase__ ( self ) ->int: '''simple docstring''' __lowerCAmelCase : List[str] = self.dummy_uncond_unet __lowerCAmelCase : Any = PNDMScheduler() __lowerCAmelCase : Dict = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' ).images __lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) __lowerCAmelCase : List[Any] = pndm(generator=A_ , num_inference_steps=20 , output_type='''numpy''' , return_dict=A_ )[0] __lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] __lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[Any]: '''simple docstring''' __lowerCAmelCase : Optional[int] = '''google/ddpm-cifar10-32''' __lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(A_ ) __lowerCAmelCase : int = PNDMScheduler() __lowerCAmelCase : Any = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCAmelCase : Tuple = torch.manual_seed(0 ) __lowerCAmelCase : Any = pndm(generator=A_ , output_type='''numpy''' ).images __lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
275
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __lowercase (unittest.TestCase ): def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' debug_launcher(test_script.main ) def UpperCamelCase__ ( self ) ->Optional[int]: '''simple docstring''' debug_launcher(test_ops.main )
275
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _UpperCamelCase = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _UpperCamelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _UpperCamelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] ) return (item, float(lowercase__ )) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 ) __lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:] __lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = list(lowercase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowerCAmelCase : int = random.choice(lowercase__ ) return "".join(lowercase__ ) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ): __lowerCAmelCase : str = [] # Generate more children proportionally to the fitness score. __lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1 __lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n for _ in range(lowercase__ ): __lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0] __lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ ) # Append new string to the population list. pop.append(mutate(lowercase__ , lowercase__ ) ) pop.append(mutate(lowercase__ , lowercase__ ) ) return pop def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(lowercase__ ) # Verify that the target contains no genes besides the ones inside genes variable. __lowerCAmelCase : Any = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(lowercase__ ) # Generate random starting population. __lowerCAmelCase : List[Any] = [] for _ in range(lowercase__ ): population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) ) # Just some logs to know what the algorithms is doing. __lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowercase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population] # Check if there is a matching evolution. __lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowercase__ ) # Normalize population score to be between 0 and 1. __lowerCAmelCase : List[Any] = [ (item, score / len(lowercase__ )) for item, score in population_score ] # This is selection for i in range(lowercase__ ): population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowercase__ ) > N_POPULATION: break if __name__ == "__main__": _UpperCamelCase = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _UpperCamelCase = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
275
1
def _lowercase ( lowercase__ = 1_0_0_0 ): __lowerCAmelCase : str = 2**power __lowerCAmelCase : Any = str(lowercase__ ) __lowerCAmelCase : Optional[int] = list(lowercase__ ) __lowerCAmelCase : List[Any] = 0 for i in list_num: sum_of_num += int(lowercase__ ) return sum_of_num if __name__ == "__main__": _UpperCamelCase = int(input("Enter the power of 2: ").strip()) print("2 ^ ", power, " = ", 2**power) _UpperCamelCase = solution(power) print("Sum of the digits is: ", result)
275
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"vocab_file": "spiece.model"} _UpperCamelCase = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } _UpperCamelCase = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class __lowercase (_UpperCAmelCase ): _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None: '''simple docstring''' __lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs __lowerCAmelCase : int = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) __lowerCAmelCase : Union[str, Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token __lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token __lowerCAmelCase : int = eos_token if bos_token is None else bos_token else: __lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token __lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __lowerCAmelCase : Union[str, Any] = do_lower_case __lowerCAmelCase : Union[str, Any] = remove_space __lowerCAmelCase : int = keep_accents __lowerCAmelCase : Union[str, Any] = vocab_file __lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off __lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __lowerCAmelCase : int = re.compile( f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) ->Dict: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = self.__dict__.copy() __lowerCAmelCase : List[Any] = None return state def __setstate__( self , A_ ) ->Tuple: '''simple docstring''' __lowerCAmelCase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase : List[Any] = {} __lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCamelCase__ ( self ) ->int: '''simple docstring''' return len(self.sp_model ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ ) # Normalize whitespaces __lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization __lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ ) return text def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]: '''simple docstring''' __lowerCAmelCase : int = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def UpperCamelCase__ ( self , A_ ) ->int: '''simple docstring''' return self.sp_model.PieceToId(A_ ) def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def UpperCamelCase__ ( A_ ) ->str: '''simple docstring''' return out_string def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' __lowerCAmelCase : str = [] __lowerCAmelCase : Tuple = '''''' __lowerCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Optional[int] = [] else: current_sub_tokens.append(A_ ) __lowerCAmelCase : str = False out_string += self.sp_model.decode(A_ ) return out_string def UpperCamelCase__ ( self ) ->Dict[str, int]: '''simple docstring''' __lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase : Any = os.path.join( A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , '''wb''' ) as fi: __lowerCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: '''simple docstring''' if isinstance(A_ , A_ ): __lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ ) __lowerCAmelCase : Dict = self.sp_model.encode(A_ ) else: __lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text] __lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": __lowerCAmelCase : Tuple = torch.tensor(A_ ) return token_ids def UpperCamelCase__ ( self , A_ ) ->str: '''simple docstring''' return self.sp_model.decode(A_ ) def UpperCamelCase__ ( self , A_ ) ->List[int]: '''simple docstring''' __lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] __lowerCAmelCase : Any = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
275
1