code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import os import sys import unittest _UpperCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _UpperCAmelCase : Dict = os.path.join(git_repo_path, """src""", """diffusers""") class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = find_backend(' if not is_torch_available():' ) self.assertEqual(snake_case , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case_ = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(snake_case , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case_ = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(snake_case , 'torch_and_transformers_and_onnx' ) def a ( self ): snake_case_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , snake_case ) self.assertIn('torch_and_transformers' , snake_case ) self.assertIn('flax_and_transformers' , snake_case ) self.assertIn('torch_and_transformers_and_onnx' , snake_case ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def a ( self ): snake_case_ = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(snake_case , '\nCONSTANT = None\n' ) snake_case_ = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( snake_case , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) snake_case_ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' snake_case_ = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(snake_case , snake_case ) def a ( self ): snake_case_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' snake_case_ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , snake_case )
285
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : def __init__( self , snake_case , snake_case=3 , snake_case=32 , snake_case=3 , snake_case=10 , snake_case=[8, 16, 32, 64] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , snake_case=["stage2", "stage3", "stage4"] , snake_case=[2, 3, 4] , snake_case=1 , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = num_channels snake_case_ = embeddings_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_act snake_case_ = num_labels snake_case_ = scope snake_case_ = len(snake_case ) snake_case_ = out_features snake_case_ = out_indices snake_case_ = num_groups def a ( self ): snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def a ( self ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = BitModel(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = self.num_labels snake_case_ = BitForImageClassification(snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = BitBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case_ = None snake_case_ = BitBackbone(config=snake_case ) model.to(snake_case ) model.eval() snake_case_ = model(snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a ( self ): snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( lowercase_ , lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __SCREAMING_SNAKE_CASE : List[str] = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[str] = False def a ( self ): snake_case_ = BitModelTester(self ) snake_case_ = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def a ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a ( self ): return @unittest.skip(reason='Bit does not output attentions' ) def a ( self ): pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def a ( self ): pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def a ( self ): pass def a ( self ): snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(snake_case ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case ) def a ( self ): snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(config=snake_case ) for name, module in model.named_modules(): if isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def a ( self ): def check_hidden_states_output(snake_case , snake_case , snake_case ): snake_case_ = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(snake_case , snake_case ) ) snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ = layer_type snake_case_ = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(snake_case , snake_case , snake_case ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def a ( self ): pass def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def a ( self ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = BitModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): @cached_property def a ( self ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a ( self ): snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): snake_case_ = model(**snake_case ) # verify the logits snake_case_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) ) @require_torch class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = (BitBackbone,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[int] = BitConfig __SCREAMING_SNAKE_CASE : List[Any] = False def a ( self ): snake_case_ = BitModelTester(self )
285
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=UpperCamelCase__ ) snake_case_ = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=UpperCamelCase__ ) env_command_parser(subparsers=UpperCamelCase__ ) launch_command_parser(subparsers=UpperCamelCase__ ) tpu_command_parser(subparsers=UpperCamelCase__ ) test_command_parser(subparsers=UpperCamelCase__ ) # Let's go snake_case_ = parser.parse_args() if not hasattr(UpperCamelCase__ , 'func' ): parser.print_help() exit(1 ) # Run args.func(UpperCamelCase__ ) if __name__ == "__main__": main()
285
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
1
from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : List[Any] = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Optional[int] = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , ): '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): snake_case_ = new_id # turn into Numpy arrays snake_case_ = np.array(UpperCamelCase__ ) snake_case_ = np.array(UpperCamelCase__ ) if reduce_labels: snake_case_ = 255 snake_case_ = label - 1 snake_case_ = 255 snake_case_ = label != ignore_index snake_case_ = np.not_equal(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = pred_label[mask] snake_case_ = np.array(UpperCamelCase__ )[mask] snake_case_ = pred_label[pred_label == label] snake_case_ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] snake_case_ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] snake_case_ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0] snake_case_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , ): '''simple docstring''' snake_case_ = np.zeros((num_labels,) , dtype=np.floataa ) snake_case_ = np.zeros((num_labels,) , dtype=np.floataa ) snake_case_ = np.zeros((num_labels,) , dtype=np.floataa ) snake_case_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ , snake_case_ , snake_case_ , snake_case_ = intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ): '''simple docstring''' snake_case_ , snake_case_ , snake_case_ , snake_case_ = total_intersect_and_union( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # compute metrics snake_case_ = {} snake_case_ = total_area_intersect.sum() / total_area_label.sum() snake_case_ = total_area_intersect / total_area_union snake_case_ = total_area_intersect / total_area_label snake_case_ = np.nanmean(UpperCamelCase__ ) snake_case_ = np.nanmean(UpperCamelCase__ ) snake_case_ = all_acc snake_case_ = iou snake_case_ = acc if nan_to_num is not None: snake_case_ = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def a ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , ): snake_case_ = mean_iou( results=snake_case , gt_seg_maps=snake_case , num_labels=snake_case , ignore_index=snake_case , nan_to_num=snake_case , label_map=snake_case , reduce_labels=snake_case , ) return iou_result
285
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
1
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase ( lowercase_ ): def __init__( self , *snake_case , snake_case=None , snake_case=None , **snake_case ): super().__init__(*snake_case , **snake_case ) snake_case_ = eval_examples snake_case_ = post_process_function def a ( self , snake_case = None , snake_case=None , snake_case = None , snake_case = "eval" , **snake_case , ): snake_case_ = gen_kwargs.copy() snake_case_ = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length ) snake_case_ = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams ) snake_case_ = gen_kwargs snake_case_ = self.eval_dataset if eval_dataset is None else eval_dataset snake_case_ = self.get_eval_dataloader(snake_case ) snake_case_ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. snake_case_ = self.compute_metrics snake_case_ = None snake_case_ = time.time() snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: snake_case_ = eval_loop( snake_case , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , ) finally: snake_case_ = compute_metrics snake_case_ = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default snake_case_ = self.post_process_function(snake_case , snake_case , snake_case ) snake_case_ = self.compute_metrics(snake_case ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): snake_case_ = metrics.pop(snake_case ) metrics.update(output.metrics ) else: snake_case_ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(snake_case ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) snake_case_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case ) return metrics def a ( self , snake_case , snake_case , snake_case=None , snake_case = "test" , **snake_case ): snake_case_ = gen_kwargs.copy() snake_case_ = self.get_test_dataloader(snake_case ) # Temporarily disable metric computation, we will do it in the loop here. snake_case_ = self.compute_metrics snake_case_ = None snake_case_ = time.time() snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: snake_case_ = eval_loop( snake_case , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , ) finally: snake_case_ = compute_metrics snake_case_ = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output snake_case_ = self.post_process_function(snake_case , snake_case , snake_case , 'predict' ) snake_case_ = self.compute_metrics(snake_case ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): snake_case_ = metrics.pop(snake_case ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case )
285
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCAmelCase : List[str] = 16 _UpperCAmelCase : Union[str, Any] = 32 def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return int(x / 2**20 ) class lowercase : def __enter__( self ): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero snake_case_ = torch.cuda.memory_allocated() return self def __exit__( self , *snake_case ): gc.collect() torch.cuda.empty_cache() snake_case_ = torch.cuda.memory_allocated() snake_case_ = torch.cuda.max_memory_allocated() snake_case_ = bamb(self.end - self.begin ) snake_case_ = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = "bert-base-cased" , UpperCamelCase__ = 320 , UpperCamelCase__ = 160 , ): '''simple docstring''' snake_case_ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) snake_case_ = load_dataset( 'glue' , 'mrpc' , split={'train': F'''train[:{n_train}]''', 'validation': F'''validation[:{n_val}]'''} ) def tokenize_function(UpperCamelCase__ ): # max_length=None => use the model max length (it's actually the default) snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(UpperCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. snake_case_ = DataLoader( tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) snake_case_ = DataLoader( tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ = config['lr'] snake_case_ = int(config['num_epochs'] ) snake_case_ = int(config['seed'] ) snake_case_ = int(config['batch_size'] ) snake_case_ = args.model_name_or_path set_seed(UpperCamelCase__ ) snake_case_ , snake_case_ = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ ) # Instantiate optimizer snake_case_ = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: snake_case_ = 1 snake_case_ = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , ) else: snake_case_ = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # We need to keep track of how many total steps we have iterated over snake_case_ = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ = 0 # Now we train the model snake_case_ = {} for epoch in range(UpperCamelCase__ , UpperCamelCase__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(UpperCamelCase__ ): snake_case_ = model(**UpperCamelCase__ ) snake_case_ = outputs.loss snake_case_ = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) snake_case_ = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=UpperCamelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCamelCase__ , ) parser.add_argument( '--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=UpperCamelCase__ , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=UpperCamelCase__ , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=UpperCamelCase__ , default=1 , help='Number of train epochs.' , ) snake_case_ = parser.parse_args() snake_case_ = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
285
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
1
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCamelCase__ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="groupvit-gcc-yfcc" , UpperCamelCase__=False ): '''simple docstring''' snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCamelCase__ ).eval() snake_case_ = torch.load(UpperCamelCase__ , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ , snake_case_ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase__ ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCamelCase__ ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) print('Successfully saved processor and model to' , UpperCamelCase__ ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCamelCase__ , organization='nielsr' ) model.push_to_hub(UpperCamelCase__ , organization='nielsr' ) if __name__ == "__main__": _UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _UpperCAmelCase : str = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
1
from ..utils import DummyObject, requires_backends class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Tuple = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Tuple = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Tuple = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : str = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Dict = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : str = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : int = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Tuple = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Dict = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Any = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Dict = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Any = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : int = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : int = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Any = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : int = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Any = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : int = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] ) class lowercase ( metaclass=lowercase_ ): __SCREAMING_SNAKE_CASE : List[Any] = ['''sentencepiece'''] def __init__( self , *snake_case , **snake_case ): requires_backends(self , ['sentencepiece'] )
285
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
1
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0 ), F'''number_of_steps needs to be positive integer, your input {number_of_steps}''' if number_of_steps == 1: return 1 snake_case_ , snake_case_ = 1, 1 for _ in range(number_of_steps - 1 ): snake_case_ , snake_case_ = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
285
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
1
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _UpperCAmelCase : List[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Optional[int] = """PoolFormerConfig""" # Base docstring _UpperCAmelCase : Dict = """sail/poolformer_s12""" _UpperCAmelCase : str = [1, 512, 7, 7] # Image classification docstring _UpperCAmelCase : Optional[int] = """sail/poolformer_s12""" _UpperCAmelCase : Any = """tabby, tabby cat""" _UpperCAmelCase : str = [ """sail/poolformer_s12""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False ): '''simple docstring''' if drop_prob == 0.0 or not training: return input snake_case_ = 1 - drop_prob snake_case_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets snake_case_ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize snake_case_ = input.div(UpperCamelCase__ ) * random_tensor return output class lowercase ( nn.Module ): def __init__( self , snake_case = None ): super().__init__() snake_case_ = drop_prob def a ( self , snake_case ): return drop_path(snake_case , self.drop_prob , self.training ) def a ( self ): return "p={}".format(self.drop_prob ) class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ): super().__init__() snake_case_ = patch_size if isinstance(snake_case , collections.abc.Iterable ) else (patch_size, patch_size) snake_case_ = stride if isinstance(snake_case , collections.abc.Iterable ) else (stride, stride) snake_case_ = padding if isinstance(snake_case , collections.abc.Iterable ) else (padding, padding) snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=snake_case ) snake_case_ = norm_layer(snake_case ) if norm_layer else nn.Identity() def a ( self , snake_case ): snake_case_ = self.projection(snake_case ) snake_case_ = self.norm(snake_case ) return embeddings class lowercase ( nn.GroupNorm ): def __init__( self , snake_case , **snake_case ): super().__init__(1 , snake_case , **snake_case ) class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.AvgPoolad(snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case ) def a ( self , snake_case ): return self.pool(snake_case ) - hidden_states class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , 1 ) snake_case_ = nn.Convad(snake_case , snake_case , 1 ) snake_case_ = PoolFormerDropPath(snake_case ) if isinstance(config.hidden_act , snake_case ): snake_case_ = ACTaFN[config.hidden_act] else: snake_case_ = config.hidden_act def a ( self , snake_case ): snake_case_ = self.conva(snake_case ) snake_case_ = self.act_fn(snake_case ) snake_case_ = self.drop(snake_case ) snake_case_ = self.conva(snake_case ) snake_case_ = self.drop(snake_case ) return hidden_states class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): super().__init__() snake_case_ = PoolFormerPooling(snake_case ) snake_case_ = PoolFormerOutput(snake_case , snake_case , snake_case , snake_case ) snake_case_ = PoolFormerGroupNorm(snake_case ) snake_case_ = PoolFormerGroupNorm(snake_case ) # Useful for training neural nets snake_case_ = PoolFormerDropPath(snake_case ) if drop_path > 0.0 else nn.Identity() snake_case_ = config.use_layer_scale if config.use_layer_scale: snake_case_ = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case) ) , requires_grad=snake_case ) snake_case_ = nn.Parameter( config.layer_scale_init_value * torch.ones((snake_case) ) , requires_grad=snake_case ) def a ( self , snake_case ): if self.use_layer_scale: snake_case_ = self.pooling(self.before_norm(snake_case ) ) snake_case_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection snake_case_ = hidden_states + self.drop_path(snake_case ) snake_case_ = () snake_case_ = self.output(self.after_norm(snake_case ) ) snake_case_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection snake_case_ = hidden_states + self.drop_path(snake_case ) snake_case_ = (output,) + outputs return outputs else: snake_case_ = self.drop_path(self.pooling(self.before_norm(snake_case ) ) ) # First residual connection snake_case_ = pooling_output + hidden_states snake_case_ = () # Second residual connection inside the PoolFormerOutput block snake_case_ = self.drop_path(self.output(self.after_norm(snake_case ) ) ) snake_case_ = hidden_states + layer_output snake_case_ = (output,) + outputs return outputs class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = config # stochastic depth decay rule snake_case_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings snake_case_ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) snake_case_ = nn.ModuleList(snake_case ) # Transformer blocks snake_case_ = [] snake_case_ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers snake_case_ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(snake_case ) ) snake_case_ = nn.ModuleList(snake_case ) def a ( self , snake_case , snake_case=False , snake_case=True ): snake_case_ = () if output_hidden_states else None snake_case_ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): snake_case_ , snake_case_ = layers # Get patch embeddings from hidden_states snake_case_ = embedding_layer(snake_case ) # Send the embeddings through the blocks for _, blk in enumerate(snake_case ): snake_case_ = blk(snake_case ) snake_case_ = layer_outputs[0] if output_hidden_states: snake_case_ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Tuple = PoolFormerConfig __SCREAMING_SNAKE_CASE : List[Any] = '''poolformer''' __SCREAMING_SNAKE_CASE : Optional[int] = '''pixel_values''' __SCREAMING_SNAKE_CASE : Any = True def a ( self , snake_case ): if isinstance(snake_case , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Optional[Any] = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Dict = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. """ @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = PoolFormerEncoder(snake_case ) # Initialize weights and apply final processing self.post_init() def a ( self ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case , ) snake_case_ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=encoder_outputs.hidden_states , ) class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.Linear(config.hidden_size , config.hidden_size ) def a ( self , snake_case ): snake_case_ = self.dense(snake_case ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = PoolFormerModel(snake_case ) # Final norm snake_case_ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head snake_case_ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.poolformer( snake_case , output_hidden_states=snake_case , return_dict=snake_case , ) snake_case_ = outputs[0] snake_case_ = self.classifier(self.norm(snake_case ).mean([-2, -1] ) ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
285
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
1
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase : Optional[Any] = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } _UpperCAmelCase : List[Any] = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } _UpperCAmelCase : Union[str, Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE : str = RoFormerTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ): super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , snake_case ) != do_lower_case or pre_tok_state.get('strip_accents' , snake_case ) != strip_accents ): snake_case_ = getattr(snake_case , pre_tok_state.pop('type' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = pre_tok_class(**snake_case ) snake_case_ = do_lower_case def __getstate__( self ): snake_case_ = self.__dict__.copy() snake_case_ = BertPreTokenizer() return state def __setstate__( self , snake_case ): snake_case_ = d snake_case_ = self.__dict__['_tokenizer'].get_vocab() snake_case_ = PreTokenizer.custom(JiebaPreTokenizer(snake_case ) ) def a ( self , snake_case , snake_case=None ): snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a ( self , snake_case , snake_case = None ): snake_case_ = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def a ( self , snake_case , snake_case=None , snake_case=None , snake_case=False , **snake_case , ): snake_case_ = BertPreTokenizer() return super().save_pretrained(snake_case , snake_case , snake_case , snake_case , **snake_case )
285
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
1
from __future__ import annotations from collections import Counter from random import random class lowercase : def __init__( self ): snake_case_ = {} def a ( self , snake_case ): snake_case_ = {} def a ( self , snake_case , snake_case , snake_case ): if nodea not in self.connections: self.add_node(snake_case ) if nodea not in self.connections: self.add_node(snake_case ) snake_case_ = probability def a ( self ): return list(self.connections ) def a ( self , snake_case ): snake_case_ = 0 snake_case_ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = Counter(graph.get_nodes() ) snake_case_ = start for _ in range(UpperCamelCase__ ): snake_case_ = graph.transition(UpperCamelCase__ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
285
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Dict = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
285
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = torch.load(UpperCamelCase__ , map_location='cpu' ) snake_case_ = chkpt['model'] # We have the base model one level deeper than the original XLM repository snake_case_ = {} for k, v in state_dict.items(): if "pred_layer" in k: snake_case_ = v else: snake_case_ = v snake_case_ = chkpt['params'] snake_case_ = {n: v for n, v in config.items() if not isinstance(UpperCamelCase__ , (torch.FloatTensor, numpy.ndarray) )} snake_case_ = chkpt['dico_word2id'] snake_case_ = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()} # Save pytorch-model snake_case_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME snake_case_ = pytorch_dump_folder_path + '/' + CONFIG_NAME snake_case_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file'] print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase__ , indent=2 ) + '\n' ) print(F'''Save vocab file to {pytorch_config_dump_path}''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase__ , indent=2 ) + '\n' ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _UpperCAmelCase : List[Any] = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
285
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _UpperCAmelCase : Any = """pt""" elif is_tf_available(): _UpperCAmelCase : Optional[Any] = """tf""" else: _UpperCAmelCase : Union[str, Any] = """jax""" class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = PerceiverTokenizer __SCREAMING_SNAKE_CASE : str = False def a ( self ): super().setUp() snake_case_ = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def a ( self , **snake_case ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , snake_case , snake_case=False , snake_case=20 , snake_case=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. snake_case_ = [] for i in range(len(snake_case ) ): try: snake_case_ = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case ) except UnicodeDecodeError: pass toks.append((i, tok) ) snake_case_ = list(filter(lambda snake_case : re.match(R'^[ a-zA-Z]+$' , t[1] ) , snake_case ) ) snake_case_ = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) ) if max_length is not None and len(snake_case ) > max_length: snake_case_ = toks[:max_length] if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0: while len(snake_case ) < min_length: snake_case_ = toks + toks # toks_str = [t[1] for t in toks] snake_case_ = [t[0] for t in toks] # Ensure consistency snake_case_ = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) if " " not in output_txt and len(snake_case ) > 1: snake_case_ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case ) ) if with_prefix_space: snake_case_ = ' ' + output_txt snake_case_ = tokenizer.encode(snake_case , add_special_tokens=snake_case ) return output_txt, output_ids def a ( self ): snake_case_ = self.perceiver_tokenizer snake_case_ = 'Unicode €.' snake_case_ = tokenizer(snake_case ) snake_case_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , snake_case ) # decoding snake_case_ = tokenizer.decode(snake_case ) self.assertEqual(snake_case , '[CLS]Unicode €.[SEP]' ) snake_case_ = tokenizer('e è é ê ë' ) snake_case_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , snake_case ) # decoding snake_case_ = tokenizer.decode(snake_case ) self.assertEqual(snake_case , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def a ( self ): snake_case_ = self.perceiver_tokenizer snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off snake_case_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on snake_case_ = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) if FRAMEWORK != "jax": snake_case_ = list(batch.input_ids.numpy()[0] ) else: snake_case_ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(snake_case , snake_case ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def a ( self ): snake_case_ = self.perceiver_tokenizer snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] snake_case_ = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , snake_case ) self.assertIn('attention_mask' , snake_case ) self.assertNotIn('decoder_input_ids' , snake_case ) self.assertNotIn('decoder_attention_mask' , snake_case ) def a ( self ): snake_case_ = self.perceiver_tokenizer snake_case_ = [ 'Summary of the text.', 'Another summary.', ] snake_case_ = tokenizer( text_target=snake_case , max_length=32 , padding='max_length' , truncation=snake_case , return_tensors=snake_case ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def a ( self ): # safety check on max_len default value so we are sure the test works snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ = tempfile.mkdtemp() snake_case_ = ' He is very happy, UNwant\u00E9d,running' snake_case_ = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) snake_case_ = tokenizer.__class__.from_pretrained(snake_case ) snake_case_ = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) shutil.rmtree(snake_case ) snake_case_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ = tempfile.mkdtemp() snake_case_ = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) snake_case_ = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) snake_case_ = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) snake_case_ = tokenizer.__class__.from_pretrained(snake_case ) snake_case_ = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) snake_case_ = tokenizer.__class__.from_pretrained(snake_case , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(snake_case ) def a ( self ): snake_case_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(snake_case ) with open(os.path.join(snake_case , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: snake_case_ = json.load(snake_case ) with open(os.path.join(snake_case , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: snake_case_ = json.load(snake_case ) snake_case_ = [F'''<extra_id_{i}>''' for i in range(125 )] snake_case_ = added_tokens_extra_ids + [ 'an_additional_special_token' ] snake_case_ = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(snake_case , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(snake_case , snake_case ) with open(os.path.join(snake_case , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(snake_case , snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case_ = tokenizer_class.from_pretrained( snake_case , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case_ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=snake_case )] snake_case_ = tokenizer_class.from_pretrained( snake_case , additional_special_tokens=snake_case , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def a ( self ): snake_case_ = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def a ( self ): pass def a ( self ): pass def a ( self ): pass def a ( self ): pass def a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens snake_case_ = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] snake_case_ = tokenizer.convert_tokens_to_string(snake_case ) self.assertIsInstance(snake_case , snake_case )
285
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionLatentUpscalePipeline __SCREAMING_SNAKE_CASE : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''height''', '''width''', '''cross_attention_kwargs''', '''negative_prompt_embeds''', '''prompt_embeds''', } __SCREAMING_SNAKE_CASE : Dict = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''} __SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __SCREAMING_SNAKE_CASE : Optional[int] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __SCREAMING_SNAKE_CASE : Any = frozenset([] ) __SCREAMING_SNAKE_CASE : Any = True @property def a ( self ): snake_case_ = 1 snake_case_ = 4 snake_case_ = (16, 16) snake_case_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case ) return image def a ( self ): torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( act_fn='gelu' , attention_head_dim=8 , norm_num_groups=snake_case , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( 'KDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', ) , in_channels=8 , mid_block_type=snake_case , only_cross_attention=snake_case , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , ) snake_case_ = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', ] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) snake_case_ = EulerDiscreteScheduler(prediction_type='sample' ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , ) snake_case_ = CLIPTextModel(snake_case ) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case_ = { 'unet': model.eval(), 'vae': vae.eval(), 'scheduler': scheduler, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def a ( self , snake_case , snake_case=0 ): if str(snake_case ).startswith('mps' ): snake_case_ = torch.manual_seed(snake_case ) else: snake_case_ = torch.Generator(device=snake_case ).manual_seed(snake_case ) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': self.dummy_image.cpu(), 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def a ( self ): snake_case_ = 'cpu' snake_case_ = self.get_dummy_components() snake_case_ = self.pipeline_class(**snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) snake_case_ = self.get_dummy_inputs(snake_case ) snake_case_ = pipe(**snake_case ).images snake_case_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) snake_case_ = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) snake_case_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(snake_case , 1e-3 ) def a ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 ) def a ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 ) def a ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a ( self ): super().test_inference_batch_single_identical(expected_max_diff=7e-3 ) def a ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 ) def a ( self ): super().test_save_load_local(expected_max_difference=3e-3 ) def a ( self ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def a ( self ): snake_case_ = [ 'DDIMScheduler', 'DDPMScheduler', 'PNDMScheduler', 'HeunDiscreteScheduler', 'EulerAncestralDiscreteScheduler', 'KDPM2DiscreteScheduler', 'KDPM2AncestralDiscreteScheduler', 'DPMSolverSDEScheduler', ] snake_case_ = self.get_dummy_components() snake_case_ = self.pipeline_class(**snake_case ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) snake_case_ = self.get_dummy_inputs(snake_case ) snake_case_ = 2 snake_case_ = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue snake_case_ = getattr(snake_case , scheduler_enum.name ) snake_case_ = scheduler_cls.from_config(pipe.scheduler.config ) snake_case_ = pipe(**snake_case )[0] outputs.append(snake_case ) assert check_same_shape(snake_case ) @require_torch_gpu @slow class lowercase ( unittest.TestCase ): def a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self ): snake_case_ = torch.manual_seed(33 ) snake_case_ = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa ) pipe.to('cuda' ) snake_case_ = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa ) upscaler.to('cuda' ) snake_case_ = 'a photo of an astronaut high resolution, unreal engine, ultra realistic' snake_case_ = pipe(snake_case , generator=snake_case , output_type='latent' ).images snake_case_ = upscaler( prompt=snake_case , image=snake_case , num_inference_steps=20 , guidance_scale=0 , generator=snake_case , output_type='np' , ).images[0] snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' ) assert np.abs((expected_image - image).mean() ) < 5e-2 def a ( self ): snake_case_ = torch.manual_seed(33 ) snake_case_ = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa ) upscaler.to('cuda' ) snake_case_ = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas' snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' ) snake_case_ = upscaler( prompt=snake_case , image=snake_case , num_inference_steps=20 , guidance_scale=0 , generator=snake_case , output_type='np' , ).images[0] snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' ) assert np.abs((expected_image - image).max() ) < 5e-2
285
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
1
UpperCAmelCase__ = 256 # Modulus to hash a string UpperCAmelCase__ = 1000003 def _a ( a :str , a :str ) -> bool: a = len(a ) a = len(a ) if p_len > t_len: return False a = 0 a = 0 a = 1 # Calculating the hash of pattern and substring of text for i in range(a ): a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus a = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue a = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash a = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _a ( ) -> None: a = '''abc1abc12''' a = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' a = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(a , a ) and not rabin_karp(a , a ) # Test 2) a = '''ABABX''' a = '''ABABZABABYABABX''' assert rabin_karp(a , a ) # Test 3) a = '''AAAB''' a = '''ABAAAAAB''' assert rabin_karp(a , a ) # Test 4) a = '''abcdabcy''' a = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(a , a ) # Test 5) a = '''Lü''' a = '''Lüsai''' assert rabin_karp(a , a ) a = '''Lue''' assert not rabin_karp(a , a ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __A : def __init__(self : Dict , __a : Optional[Any] , __a : List[Any]=3 , __a : List[str]=7 , __a : int=True , __a : int=True , __a : Dict=False , __a : List[str]=True , __a : int=99 , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : Optional[Any]=37 , __a : str="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : str=512 , __a : int=16 , __a : str=2 , __a : Tuple=0.02 , __a : str=3 , __a : Tuple=4 , __a : List[str]=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase (self : Union[str, Any] ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , ) def _lowercase (self : Optional[int] , __a : Union[str, Any] , __a : int , __a : Any , __a : int , __a : Union[str, Any] , __a : str , __a : int ): UpperCAmelCase_ = FalconModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : Union[str, Any] , __a : str , __a : Optional[int] , __a : List[Any] , __a : Optional[Any] , __a : str , __a : Any , __a : List[Any] , __a : int , __a : List[Any] , ): UpperCAmelCase_ = True UpperCAmelCase_ = FalconModel(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , ) UpperCAmelCase_ = model(__a , attention_mask=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase (self : Optional[Any] , __a : Dict , __a : Any , __a : str , __a : Dict , __a : List[Any] , __a : str , __a : Tuple , __a : int , __a : List[Any] , ): UpperCAmelCase_ = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase (self : Dict , __a : int , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Optional[int] , __a : Any , ): UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() # first forward pass UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , ) UpperCAmelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0] UpperCAmelCase_ = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : str = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) a__ : int = (FalconForCausalLM,) if is_torch_available() else () a__ : Optional[int] = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) a__ : Any = False a__ : Tuple = False def _lowercase (self : int ): UpperCAmelCase_ = FalconModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : List[str] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ , *UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: UpperCAmelCase_ = alibi self.model_tester.create_and_check_model(__a , *__a ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = "single_label_classification" UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Any ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = FalconForCausalLM(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , use_cache=__a ) UpperCAmelCase_ = input_ids.shape[0] UpperCAmelCase_ = model._convert_to_rw_cache(result.past_key_values ) UpperCAmelCase_ = model._convert_cache_to_standard_format(__a , __a ) for layer in range(len(__a ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = 3 UpperCAmelCase_ = "multi_label_classification" UpperCAmelCase_ = input_dict["input_ids"] UpperCAmelCase_ = input_ids.ne(1 ).to(__a ) UpperCAmelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase (self : Tuple ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__a , "use_cache" ): return UpperCAmelCase_ = model_class(__a ).to(__a ) if "use_cache" not in inputs: UpperCAmelCase_ = True UpperCAmelCase_ = model(**__a ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return UpperCAmelCase_ = ( getattr(__a , "decoder_layers" , __a ) or getattr(__a , "num_decoder_layers" , __a ) or config.num_hidden_layers ) UpperCAmelCase_ = getattr(__a , "num_kv_heads" , config.num_attention_heads ) UpperCAmelCase_ = getattr(__a , "d_model" , config.hidden_size ) UpperCAmelCase_ = embed_dim // num_attention_heads UpperCAmelCase_ = outputs["past_key_values"] self.assertEqual(len(__a ) , __a ) UpperCAmelCase_ , UpperCAmelCase_ = inputs["input_ids"].shape for i in range(__a ): if config.new_decoder_architecture: UpperCAmelCase_ = config.num_attention_heads elif config.multi_query: UpperCAmelCase_ = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __A ( unittest.TestCase ): @slow def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) UpperCAmelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(__a ) UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) UpperCAmelCase_ = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=19 ) UpperCAmelCase_ = tokenizer.batch_decode(__a )[0] self.assertEqual(__a , __a ) @slow def _lowercase (self : Dict ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a ) UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(__a ) UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , num_beams=2 , max_new_tokens=4 ) @slow def _lowercase (self : Union[str, Any] ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a ) UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(device=__a ) UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a ) # Test results are the same with and without cache UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def lowerCAmelCase_ ( ): '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
3
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
'''simple docstring''' class UpperCAmelCase_ : def __init__( self : List[Any] ) -> Any: lowerCAmelCase = {} def __UpperCAmelCase ( self : Any ) -> None: print(self.vertex ) for i in self.vertex: print(UpperCAmelCase__ , ' -> ' , ' -> '.join([str(UpperCAmelCase__ ) for j in self.vertex[i]] ) ) def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(UpperCAmelCase__ ) else: # else make a new vertex lowerCAmelCase = [to_vertex] def __UpperCAmelCase ( self : List[Any] ) -> None: # visited array for storing already visited nodes lowerCAmelCase = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : list ) -> None: # mark start vertex as visited lowerCAmelCase = True print(UpperCAmelCase__ , end=' ' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": __snake_case =Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
4
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
import os from typing import Dict, List, Tuple, TypeVar, Union UpperCAmelCase__ = TypeVar('''T''') UpperCAmelCase__ = Union[List[T], Tuple[T, ...]] UpperCAmelCase__ = Union[T, List[T], Dict[str, T]] UpperCAmelCase__ = Union[str, bytes, os.PathLike]
5
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging A : Dict = logging.get_logger(__name__) class __A( a ): snake_case_ = '''encoder-decoder''' snake_case_ = True def __init__( self , **_snake_case ) -> str: '''simple docstring''' super().__init__(**_snake_case ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" __a = kwargs.pop('''encoder''' ) __a = encoder_config.pop('''model_type''' ) __a = kwargs.pop('''decoder''' ) __a = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig __a = AutoConfig.for_model(_snake_case , **_snake_case ) __a = AutoConfig.for_model(_snake_case , **_snake_case ) __a = True @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) __a = True __a = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) __a = self.encoder.to_dict() __a = self.decoder.to_dict() __a = self.__class__.model_type return output
6
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: A__ = '' else: A__ = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) A__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[ : config.hidden_size, : ] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: '''simple docstring''' A__ = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( ) -> Any: '''simple docstring''' A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Dict: '''simple docstring''' A__ = ViTConfig() A__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A__ = True A__ = int(vit_name[-12:-10] ) A__ = int(vit_name[-9:-6] ) else: A__ = 1000 A__ = 'huggingface/label-files' A__ = 'imagenet-1k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = int(vit_name[-6:-4] ) A__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('tiny' ): A__ = 192 A__ = 768 A__ = 12 A__ = 3 elif vit_name[9:].startswith('small' ): A__ = 384 A__ = 1536 A__ = 12 A__ = 6 else: pass else: if vit_name[4:].startswith('small' ): A__ = 768 A__ = 2304 A__ = 8 A__ = 8 elif vit_name[4:].startswith('base' ): pass elif vit_name[4:].startswith('large' ): A__ = 1024 A__ = 4096 A__ = 24 A__ = 16 elif vit_name[4:].startswith('huge' ): A__ = 1280 A__ = 5120 A__ = 32 A__ = 16 # load original model from timm A__ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ = timm_model.state_dict() if base_model: remove_classification_head_(SCREAMING_SNAKE_CASE__ ) A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load HuggingFace model if vit_name[-5:] == "in21k": A__ = ViTModel(SCREAMING_SNAKE_CASE__ ).eval() else: A__ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A__ = DeiTImageProcessor(size=config.image_size ) else: A__ = ViTImageProcessor(size=config.image_size ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = encoding['pixel_values'] A__ = model(SCREAMING_SNAKE_CASE__ ) if base_model: A__ = timm_model.forward_features(SCREAMING_SNAKE_CASE__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.pooler_output , atol=1E-3 ) else: A__ = timm_model(SCREAMING_SNAKE_CASE__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowercase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
7
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger('''transformers.models.speecht5''') lowerCAmelCase_ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } lowerCAmelCase_ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } lowerCAmelCase_ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } lowerCAmelCase_ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } lowerCAmelCase_ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } lowerCAmelCase_ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } lowerCAmelCase_ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } lowerCAmelCase_ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } lowerCAmelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } lowerCAmelCase_ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } lowerCAmelCase_ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } lowerCAmelCase_ = [] lowerCAmelCase_ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] lowerCAmelCase_ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] lowerCAmelCase_ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] lowerCAmelCase_ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for attribute in key.split('''.''' ): snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: snake_case_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value elif weight_type == "running_mean": snake_case_ = value elif weight_type == "running_var": snake_case_ = value elif weight_type == "num_batches_tracked": snake_case_ = value else: snake_case_ = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: snake_case_, snake_case_ = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if task == "s2t": snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ = MAPPING_S2T snake_case_ = IGNORE_KEYS_S2T elif task == "t2s": snake_case_ = None snake_case_ = MAPPING_T2S snake_case_ = IGNORE_KEYS_T2S elif task == "s2s": snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder snake_case_ = MAPPING_S2S snake_case_ = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): logger.info(F'''{name} was ignored''' ) continue snake_case_ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , ) snake_case_ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: snake_case_, snake_case_ = key.split('''.*.''' ) if prefix in name and suffix in name: snake_case_ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2] snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: snake_case_ = '''weight_g''' elif "weight_v" in name: snake_case_ = '''weight_v''' elif "bias" in name: snake_case_ = '''bias''' elif "weight" in name: snake_case_ = '''weight''' elif "running_mean" in name: snake_case_ = '''running_mean''' elif "running_var" in name: snake_case_ = '''running_var''' elif "num_batches_tracked" in name: snake_case_ = '''num_batches_tracked''' else: snake_case_ = None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = full_name.split('''conv_layers.''' )[-1] snake_case_ = name.split('''.''' ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ): if config_path is not None: snake_case_ = SpeechTaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: snake_case_ = SpeechTaConfig() if task == "s2t": snake_case_ = config.max_text_positions snake_case_ = SpeechTaForSpeechToText(SCREAMING_SNAKE_CASE__ ) elif task == "t2s": snake_case_ = 1876 snake_case_ = 600 snake_case_ = config.max_speech_positions snake_case_ = SpeechTaForTextToSpeech(SCREAMING_SNAKE_CASE__ ) elif task == "s2s": snake_case_ = 1876 snake_case_ = config.max_speech_positions snake_case_ = SpeechTaForSpeechToSpeech(SCREAMING_SNAKE_CASE__ ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: snake_case_ = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it snake_case_ = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) snake_case_ = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) snake_case_ = SpeechTaFeatureExtractor() snake_case_ = SpeechTaProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
8
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp() # fmt: off __SCREAMING_SNAKE_CASE : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __SCREAMING_SNAKE_CASE : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __SCREAMING_SNAKE_CASE : Optional[int] = {'''unk_token''': '''<unk>'''} __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } __SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , lowerCAmelCase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Optional[int] , **lowerCAmelCase__ :Union[str, Any] ) -> List[str]: return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :int , **lowerCAmelCase__ :Union[str, Any] ) -> Any: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :List[Any] , **lowerCAmelCase__ :int ) -> Union[str, Any]: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> List[str]: shutil.rmtree(self.tmpdirname ) def __magic_name__( self :Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __magic_name__( self :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor() __SCREAMING_SNAKE_CASE : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ ) def __magic_name__( self :Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) __SCREAMING_SNAKE_CASE : int = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = self.get_image_processor() __SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : int = image_processor(lowerCAmelCase__ , return_tensors='''np''' ) __SCREAMING_SNAKE_CASE : Any = processor(images=lowerCAmelCase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __magic_name__( self :Optional[int] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Any = self.get_image_processor() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = '''lower newer''' __SCREAMING_SNAKE_CASE : List[str] = processor(text=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __magic_name__( self :Optional[Any] ) -> List[str]: __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer''' __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : str = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def __magic_name__( self :str ) -> int: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __SCREAMING_SNAKE_CASE : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Any ) -> Tuple: __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() __SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer''' __SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: str =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) lowerCamelCase__: Dict =get_activation("gelu") self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase_) , torch_builtin(UpperCAmelCase_))) self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase_) , gelu_new(UpperCAmelCase_))) def SCREAMING_SNAKE_CASE_ (self : str) ->str: '''simple docstring''' lowerCamelCase__: List[str] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) lowerCamelCase__: str =get_activation("gelu") lowerCamelCase__: Union[str, Any] =get_activation("gelu_10") lowerCamelCase__: Dict =torch_builtin(UpperCAmelCase_) lowerCamelCase__: Any =geluaa(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =torch.where(y_gelu_aa < 10.0 , 1 , 0) self.assertTrue(torch.max(UpperCAmelCase_).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask)) def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]: '''simple docstring''' get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(UpperCAmelCase_): get_activation("bogus") with self.assertRaises(UpperCAmelCase_): get_activation(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Dict =get_activation("gelu") lowerCamelCase__: str =1 lowerCamelCase__: Union[str, Any] =get_activation("gelu") self.assertEqual(acta.a , 1) with self.assertRaises(UpperCAmelCase_): lowerCamelCase__: Union[str, Any] =acta.a
10
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "dpt" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=3_8_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=[2, 5, 8, 1_1] , __lowerCamelCase="project" , __lowerCamelCase=[4, 2, 1, 0.5] , __lowerCamelCase=[9_6, 1_9_2, 3_8_4, 7_6_8] , __lowerCamelCase=2_5_6 , __lowerCamelCase=-1 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.4 , __lowerCamelCase=2_5_5 , __lowerCamelCase=0.1 , __lowerCamelCase=[1, 1_0_2_4, 2_4, 2_4] , __lowerCamelCase=[0, 1] , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]: super().__init__(**__lowerCamelCase) _A : Any = hidden_size _A : Dict = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone.") _A : int = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } _A : Dict = BitConfig(**__lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): logger.info("Initializing the config with a `BiT` backbone.") _A : Optional[Any] = BitConfig(**__lowerCamelCase) elif isinstance(__lowerCamelCase , __lowerCamelCase): _A : List[str] = backbone_config else: raise ValueError( F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.") _A : Any = backbone_featmap_shape _A : Optional[Any] = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.") else: _A : List[Any] = None _A : List[str] = None _A : int = [] _A : Any = num_hidden_layers _A : List[str] = num_attention_heads _A : Any = intermediate_size _A : int = hidden_act _A : int = hidden_dropout_prob _A : Tuple = attention_probs_dropout_prob _A : str = initializer_range _A : Optional[Any] = layer_norm_eps _A : Optional[Any] = image_size _A : Any = patch_size _A : List[Any] = num_channels _A : Union[str, Any] = qkv_bias _A : List[Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") _A : int = readout_type _A : Union[str, Any] = reassemble_factors _A : str = neck_hidden_sizes _A : Optional[Any] = fusion_hidden_size _A : Union[str, Any] = head_in_index _A : List[str] = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _A : str = use_auxiliary_head _A : Union[str, Any] = auxiliary_loss_weight _A : Any = semantic_loss_ignore_index _A : Any = semantic_classifier_dropout def _lowerCamelCase ( self) -> Dict: _A : str = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: _A : Optional[Any] = self.backbone_config.to_dict() _A : Optional[int] = self.__class__.model_type return output
11
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowerCamelCase__( __lowerCamelCase): def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = SMALL_MODEL_IDENTIFIER __lowerCamelCase = """pt""" __lowerCamelCase = """tf""" def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ): __lowerCamelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ ) model_tf.save_pretrained(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = """mock_framework""" # Framework provided - return whatever the user provides __lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase_ ) __lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase_ ) __lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: int ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase_ ) __lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase_ ) __lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = MagicMock(return_value=UpperCamelCase_ ) with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ): __lowerCamelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __lowerCamelCase = MagicMock(return_value=UpperCamelCase_ ) with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ): __lowerCamelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_tf ) # Both in environment -> use PyTorch __lowerCamelCase = MagicMock(return_value=UpperCamelCase_ ) __lowerCamelCase = MagicMock(return_value=UpperCamelCase_ ) with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch( """transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ): __lowerCamelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # Both not in environment -> raise error __lowerCamelCase = MagicMock(return_value=UpperCamelCase_ ) __lowerCamelCase = MagicMock(return_value=UpperCamelCase_ ) with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch( """transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ): with self.assertRaises(UpperCamelCase_ ): __lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
12
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: SCREAMING_SNAKE_CASE_: str = mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_: Tuple = max( mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , ) SCREAMING_SNAKE_CASE_: Any = val return f[i][j] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: SCREAMING_SNAKE_CASE_: Optional[int] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: SCREAMING_SNAKE_CASE_: List[str] = dp[i - 1][w_] return dp[n][w_], dp def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not (isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase ) if num_items != len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = ( "The number of weights must be the same as the number of values.\n" f"But got {num_items} weights and {len(_UpperCAmelCase )} values" ) raise ValueError(_UpperCAmelCase ) for i in range(_UpperCAmelCase ): if not isinstance(wt[i] , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = ( "All weights must be integers but got weight of " f"type {type(wt[i] )} at index {i}" ) raise TypeError(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: set = set() _construct_solution(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return optimal_val, example_optional_set def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) else: optimal_set.add(_UpperCAmelCase ) _construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , j - wt[i - 1] , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[int] = [3, 2, 4, 4] lowerCAmelCase : str = [4, 3, 2, 3] lowerCAmelCase : Any = 4 lowerCAmelCase : List[Any] = 6 lowerCAmelCase : List[str] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCAmelCase , lowerCAmelCase : Any = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCAmelCase , lowerCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
13
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = [[0 for _ in range(lowercase_ )] for _ in range(m + 1 )] for i in range(m + 1 ): A__ = 1 for n in range(m + 1 ): for k in range(1 , lowercase_ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCamelCase : str = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: _lowerCamelCase : Any = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
14
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(a_ , n - 1 , a_ ) * a) % mod else: __A = binary_exponentiation(a_ , n / 2 , a_ ) return (b * b) % mod # a prime number SCREAMING_SNAKE_CASE :Any = 701 SCREAMING_SNAKE_CASE :List[str] = 10_0000_0000 SCREAMING_SNAKE_CASE :List[str] = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
15
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __A ( A_ ,A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) lowerCAmelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) torch.manual_seed(0 ) lowercase__ : Any = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) torch.manual_seed(0 ) lowercase__ : List[str] = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) lowercase__ : List[Any] = CLIPTextModel(_snake_case ) lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__ : Dict = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase ( self : Any ,_snake_case : List[Any] ,_snake_case : Any=0 ) -> Any: """simple docstring""" if str(_snake_case ).startswith('''mps''' ): lowercase__ : Optional[Any] = torch.manual_seed(_snake_case ) else: lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowercase__ : List[Any] = 2 lowercase__ : Optional[int] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,) lowercase__ : str = floats_tensor(control_image.shape ,rng=random.Random(_snake_case ) ).to(_snake_case ) lowercase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0] lowercase__ : Optional[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) ) lowercase__ : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def UpperCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCAmelCase ( self : Any ) -> str: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class __A ( A_ ,A_ ,unittest.TestCase ): '''simple docstring''' lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCAmelCase ( self : Tuple ) -> Any: """simple docstring""" torch.manual_seed(0 ) lowercase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) torch.manual_seed(0 ) def init_weights(_snake_case : Optional[int] ): if isinstance(_snake_case ,torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowercase__ : Any = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowercase__ : Any = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowercase__ : Dict = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,) torch.manual_seed(0 ) lowercase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0 ) lowercase__ : List[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) lowercase__ : int = CLIPTextModel(_snake_case ) lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__ : int = MultiControlNetModel([controlneta, controlneta] ) lowercase__ : Optional[Any] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Union[str, Any]=0 ) -> List[Any]: """simple docstring""" if str(_snake_case ).startswith('''mps''' ): lowercase__ : int = torch.manual_seed(_snake_case ) else: lowercase__ : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowercase__ : int = 2 lowercase__ : Optional[Any] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_snake_case ,device=torch.device(_snake_case ) ,), ] lowercase__ : Dict = floats_tensor(control_image[0].shape ,rng=random.Random(_snake_case ) ).to(_snake_case ) lowercase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0] lowercase__ : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((64, 64) ) lowercase__ : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" lowercase__ : Dict = self.get_dummy_components() lowercase__ : Dict = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) lowercase__ : Optional[Any] = 10.0 lowercase__ : Tuple = 4 lowercase__ : Dict = self.get_dummy_inputs(_snake_case ) lowercase__ : Optional[Any] = steps lowercase__ : Any = scale lowercase__ : Optional[Any] = pipe(**_snake_case )[0] lowercase__ : List[str] = self.get_dummy_inputs(_snake_case ) lowercase__ : Optional[int] = steps lowercase__ : int = scale lowercase__ : List[str] = pipe(**_snake_case ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0] lowercase__ : int = self.get_dummy_inputs(_snake_case ) lowercase__ : Optional[int] = steps lowercase__ : Dict = scale lowercase__ : Dict = pipe(**_snake_case ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0] lowercase__ : Dict = self.get_dummy_inputs(_snake_case ) lowercase__ : List[Any] = steps lowercase__ : Optional[int] = scale lowercase__ : List[Any] = pipe(**_snake_case ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" lowercase__ : Union[str, Any] = self.get_dummy_components() lowercase__ : Optional[Any] = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Any ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ : int = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowercase__ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' ,safety_checker=_snake_case ,controlnet=_snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case ) lowercase__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ : List[str] = '''evil space-punk bird''' lowercase__ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) lowercase__ : Tuple = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) lowercase__ : List[Any] = pipe( _snake_case ,_snake_case ,control_image=_snake_case ,generator=_snake_case ,output_type='''np''' ,num_inference_steps=50 ,strength=0.6 ,) lowercase__ : List[Any] = output.images[0] assert image.shape == (512, 512, 3) lowercase__ : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
16
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" __UpperCAmelCase : List[Any] = PegasusConfig __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Any = "gelu" def __init__( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : Optional[int]=7, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : Any=False, UpperCAmelCase__ : Dict=9_9, UpperCAmelCase__ : List[str]=3_2, UpperCAmelCase__ : Tuple=2, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Union[str, Any]=3_7, UpperCAmelCase__ : str=0.1, UpperCAmelCase__ : Optional[Any]=0.1, UpperCAmelCase__ : List[str]=4_0, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : List[str]=1, UpperCAmelCase__ : Tuple=0, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = bos_token_id def _lowercase ( self : Tuple ): __lowercase = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) __lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) __lowercase = tf.concat([input_ids, eos_tensor], axis=1 ) __lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __lowercase = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) __lowercase = prepare_pegasus_inputs_dict(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) return config, inputs_dict def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple ): __lowercase = TFPegasusModel(config=UpperCAmelCase__ ).get_decoder() __lowercase = inputs_dict["input_ids"] __lowercase = input_ids[:1, :] __lowercase = inputs_dict["attention_mask"][:1, :] __lowercase = inputs_dict["head_mask"] __lowercase = 1 # first forward pass __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, head_mask=UpperCAmelCase__, use_cache=UpperCAmelCase__ ) __lowercase ,__lowercase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase = ids_tensor((self.batch_size, 3), config.vocab_size ) __lowercase = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and __lowercase = tf.concat([input_ids, next_tokens], axis=-1 ) __lowercase = tf.concat([attention_mask, next_attn_mask], axis=-1 ) __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ )[0] __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, past_key_values=UpperCAmelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice __lowercase = int(ids_tensor((1,), output_from_past.shape[-1] ) ) __lowercase = output_from_no_past[:, -3:, random_slice_idx] __lowercase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase__, UpperCAmelCase__, rtol=1E-3 ) def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any=None, UpperCamelCase_ : Union[str, Any]=None, UpperCamelCase_ : List[str]=None, UpperCamelCase_ : List[Any]=None, UpperCamelCase_ : Dict=None, ) -> Tuple: '''simple docstring''' if attention_mask is None: __lowercase = tf.cast(tf.math.not_equal(UpperCamelCase_, config.pad_token_id), tf.inta) if decoder_attention_mask is None: __lowercase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.inta), ], axis=-1, ) if head_mask is None: __lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: __lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: __lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () __UpperCAmelCase : str = (TFPegasusForConditionalGeneration,) if is_tf_available() else () __UpperCAmelCase : Optional[int] = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = False def _lowercase ( self : Tuple ): __lowercase = TFPegasusModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ ) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] __UpperCAmelCase : List[str] = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers __UpperCAmelCase : Dict = "google/pegasus-xsum" @cached_property def _lowercase ( self : int ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowercase ( self : int ): __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowercase ( self : str, **UpperCAmelCase__ : Tuple ): __lowercase = self.translate_src_text(**UpperCAmelCase__ ) assert self.expected_text == generated_words def _lowercase ( self : Tuple, **UpperCAmelCase__ : Any ): __lowercase = self.tokenizer(self.src_text, **UpperCAmelCase__, padding=UpperCAmelCase__, return_tensors="tf" ) __lowercase = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=UpperCAmelCase__, ) __lowercase = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=UpperCAmelCase__ ) return generated_words @slow def _lowercase ( self : Dict ): self._assert_generated_batch_equal_expected()
17
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class a__ : def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : Any = 13 SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = 99 SCREAMING_SNAKE_CASE_ : Tuple = 384 SCREAMING_SNAKE_CASE_ : Optional[Any] = 2 SCREAMING_SNAKE_CASE_ : Any = 4 SCREAMING_SNAKE_CASE_ : str = 37 SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu" SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE_ : Dict = 512 SCREAMING_SNAKE_CASE_ : int = 16 SCREAMING_SNAKE_CASE_ : Optional[int] = 2 SCREAMING_SNAKE_CASE_ : Any = 0.02 SCREAMING_SNAKE_CASE_ : str = 3 SCREAMING_SNAKE_CASE_ : int = 4 SCREAMING_SNAKE_CASE_ : Dict = 128 SCREAMING_SNAKE_CASE_ : Any = 2 SCREAMING_SNAKE_CASE_ : Tuple = 9 SCREAMING_SNAKE_CASE_ : List[Any] = 1 SCREAMING_SNAKE_CASE_ : Any = None def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) SCREAMING_SNAKE_CASE_ : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size ) SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : str = None if self.use_labels: SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices ) SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask] SCREAMING_SNAKE_CASE_ : List[str] = model(_A ) SCREAMING_SNAKE_CASE_ : Dict = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : List[Any] = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE_ : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE_ : int = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) SCREAMING_SNAKE_CASE_ : Tuple = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : str = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A ) SCREAMING_SNAKE_CASE_ : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : Any = model(_A ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class a__ ( A__ , A__ , unittest.TestCase ): A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self ) SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Any = True if hasattr(_A,"use_cache" ): SCREAMING_SNAKE_CASE_ : List[Any] = True SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A ) SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A,saved_model=_A ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" ) SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A ) SCREAMING_SNAKE_CASE_ : str = model(_A ) if self.is_encoder_decoder: SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"] SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"] else: SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"] SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"] self.assertEqual(len(_A ),_A ) SCREAMING_SNAKE_CASE_ : Any = getattr( self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ),_A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],) self.assertEqual(len(_A ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],) @slow def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_A ) def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A ) SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A ) def check_decoder_attentions_output(_A : Dict ): SCREAMING_SNAKE_CASE_ : int = len(_A ) self.assertEqual(out_len % 2,0 ) SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions self.assertEqual(len(_A ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],) def check_encoder_attentions_output(_A : Tuple ): SCREAMING_SNAKE_CASE_ : int = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Optional[Any] = False SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A ) SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) ) SCREAMING_SNAKE_CASE_ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states,_A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A ) SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) ) self.assertEqual(config.output_hidden_states,_A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : int = model_class(_A ) SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) ) self.assertEqual(config.output_hidden_states,_A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Dict = model_class(_A ) SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) ) self.assertEqual(model.config.output_hidden_states,_A ) check_encoder_attentions_output(_A ) @require_tf class a__ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0] SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768] self.assertEqual(output.shape,_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
18
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer __A =logging.get_logger(__name__) __A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __A ={ '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } __A ={ '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } __A ={ '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } __A ={ '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } __A ={ '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } __A ={ '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } __A ={ '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } __A ={ '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } __A ={ '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class _SCREAMING_SNAKE_CASE ( snake_case_ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = DPRContextEncoderTokenizer class _SCREAMING_SNAKE_CASE ( snake_case_ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = DPRQuestionEncoderTokenizer __A =collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) __A =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) __A =R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(snake_case_ ) class _SCREAMING_SNAKE_CASE : def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , ) elif titles is None or texts is None: lowerCamelCase_ = titles if texts is None else texts return super().__call__( lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , ) lowerCamelCase_ = titles if not isinstance(lowercase , lowercase ) else [titles] lowerCamelCase_ = texts if not isinstance(lowercase , lowercase ) else [texts] lowerCamelCase_ = len(lowercase ) lowerCamelCase_ = questions if not isinstance(lowercase , lowercase ) else [questions] * n_passages assert len(lowercase ) == len( lowercase ), f'There should be as many titles than texts but got {len(lowercase )} titles and {len(lowercase )} texts.' lowerCamelCase_ = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase )["input_ids"] lowerCamelCase_ = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase )["input_ids"] lowerCamelCase_ = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowercase , lowercase ) ] } if return_attention_mask is not False: lowerCamelCase_ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ = attention_mask return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]: lowerCamelCase_ = reader_input["input_ids"] lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reader_output[:3] lowerCamelCase_ = len(lowercase ) lowerCamelCase_ = sorted(range(lowercase ) , reverse=lowercase , key=relevance_logits.__getitem__ ) lowerCamelCase_ = [] for doc_id in sorted_docs: lowerCamelCase_ = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ = sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ = len(lowercase ) lowerCamelCase_ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowercase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]: lowerCamelCase_ = [] for start_index, start_score in enumerate(lowercase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ = sorted(lowercase , key=lambda lowercase : x[1] , reverse=lowercase ) lowerCamelCase_ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' lowerCamelCase_ = end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowercase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case_ ) class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = DPRReaderTokenizer
19
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase : Optional[Any] = 250004 lowercase : Optional[int] = 250020 @require_sentencepiece @require_tokenizers class __snake_case ( lowerCAmelCase , unittest.TestCase ): _a : Optional[int]= MBartTokenizer _a : str= MBartTokenizerFast _a : Dict= True _a : int= True def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase : str = MBartTokenizer(snake_case ,keep_accents=snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = MBartTokenizer(snake_case ,keep_accents=snake_case ) lowercase : Any = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) lowercase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( snake_case ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) lowercase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual( snake_case ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] ,) lowercase : str = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowercase : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case ) lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case ,**snake_case ) lowercase : Optional[int] = tempfile.mkdtemp() lowercase : Dict = tokenizer_r.save_pretrained(snake_case ) lowercase : Tuple = tokenizer_p.save_pretrained(snake_case ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowercase : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(snake_case ,snake_case ) # Checks everything loads correctly in the same way lowercase : Any = tokenizer_r.from_pretrained(snake_case ) lowercase : str = tokenizer_p.from_pretrained(snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case ,snake_case ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case ) # Save tokenizer rust, legacy_format=True lowercase : int = tempfile.mkdtemp() lowercase : Dict = tokenizer_r.save_pretrained(snake_case ,legacy_format=snake_case ) lowercase : str = tokenizer_p.save_pretrained(snake_case ) # Checks it save with the same files self.assertSequenceEqual(snake_case ,snake_case ) # Checks everything loads correctly in the same way lowercase : Optional[int] = tokenizer_r.from_pretrained(snake_case ) lowercase : Tuple = tokenizer_p.from_pretrained(snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case ,snake_case ) ) shutil.rmtree(snake_case ) # Save tokenizer rust, legacy_format=False lowercase : List[Any] = tempfile.mkdtemp() lowercase : List[Any] = tokenizer_r.save_pretrained(snake_case ,legacy_format=snake_case ) lowercase : Union[str, Any] = tokenizer_p.save_pretrained(snake_case ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowercase : Tuple = tokenizer_r.from_pretrained(snake_case ) lowercase : Optional[int] = tokenizer_p.from_pretrained(snake_case ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case ,snake_case ) ) shutil.rmtree(snake_case ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): _a : Any= "facebook/mbart-large-en-ro" _a : Union[str, Any]= [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] _a : str= [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] _a : Optional[Any]= [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def _SCREAMING_SNAKE_CASE ( cls ): '''simple docstring''' lowercase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ) lowercase : Optional[int] = 1 return cls def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,250020 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertIn(snake_case ,self.tokenizer.all_special_ids ) lowercase : int = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowercase : Dict = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case ) lowercase : str = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case ) self.assertEqual(snake_case ,snake_case ) self.assertNotIn(self.tokenizer.eos_token ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] ,snake_case ) lowercase : Tuple = 10 lowercase : Dict = self.tokenizer(snake_case ,max_length=snake_case ,truncation=snake_case ).input_ids[0] self.assertEqual(ids[-2] ,2 ) self.assertEqual(ids[-1] ,snake_case ) self.assertEqual(len(snake_case ) ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[250026, 250001] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = tempfile.mkdtemp() lowercase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case ) lowercase : List[str] = MBartTokenizer.from_pretrained(snake_case ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case ) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case ,return_tensors="""pt""" ) lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) lowercase : Any = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case ,snake_case ) self.assertEqual((2, 14) ,batch.input_ids.shape ) self.assertEqual((2, 14) ,batch.attention_mask.shape ) lowercase : int = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,snake_case ) self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = self.tokenizer(self.src_text ,padding=snake_case ,truncation=snake_case ,max_length=3 ,return_tensors="""pt""" ) lowercase : Optional[int] = self.tokenizer( text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=10 ,return_tensors="""pt""" ) lowercase : Dict = targets["""input_ids"""] lowercase : int = shift_tokens_right(snake_case ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(snake_case ) ,{ # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } ,)
20
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE : Optional[int] = 16 SCREAMING_SNAKE_CASE : List[Any] = 32 def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = 16 ) -> Optional[Any]: _lowercase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) _lowercase : Any = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowerCamelCase_ ): # max_length=None => use the model max length (it's actually the default) _lowercase : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowercase : Union[str, Any] = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowercase : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowerCamelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowercase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowercase : Union[str, Any] = 16 elif accelerator.mixed_precision != "no": _lowercase : Any = 8 else: _lowercase : List[Any] = None return tokenizer.pad( lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , ) # Instantiate dataloaders. _lowercase : str = DataLoader( tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ ) _lowercase : str = DataLoader( tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: # Initialize accelerator _lowercase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowercase : Tuple = config['lr'] _lowercase : Any = int(config['num_epochs'] ) _lowercase : Optional[int] = int(config['seed'] ) _lowercase : List[str] = int(config['batch_size'] ) _lowercase : List[Any] = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation _lowercase : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _lowercase : List[str] = batch_size // MAX_GPU_BATCH_SIZE _lowercase : Any = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase_ ) _lowercase , _lowercase : Optional[int] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowercase : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer _lowercase : str = AdamW(params=model.parameters() , lr=lowerCamelCase_ ) # Instantiate scheduler _lowercase : Optional[Any] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : int = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Now we train the model for epoch in range(lowerCamelCase_ ): model.train() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _lowercase : str = model(**lowerCamelCase_ ) _lowercase : Tuple = outputs.loss _lowercase : str = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowercase : List[str] = model(**lowerCamelCase_ ) _lowercase : Dict = outputs.logits.argmax(dim=-1 ) _lowercase , _lowercase : str = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowerCamelCase_ , references=lowerCamelCase_ , ) _lowercase : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ ) def UpperCamelCase_( ) -> Tuple: _lowercase : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _lowercase : Optional[int] = parser.parse_args() _lowercase : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
21
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
'''simple docstring''' import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) class A_ ( lowerCAmelCase_ ): _lowerCamelCase : Optional[Any] = CLIPConfig _lowerCamelCase : Optional[Any] = ["""CLIPEncoderLayer"""] def __init__( self : Dict , snake_case_ : CLIPConfig ): super().__init__(snake_case_ ) _UpperCAmelCase = CLIPVisionModelWithProjection(config.vision_config ) _UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 ) _UpperCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowercase ( self : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=0.5 , snake_case_ : List[str]=0.5 ): _UpperCAmelCase = self.vision_model(snake_case_ )[0] _UpperCAmelCase = self.p_head(snake_case_ ) _UpperCAmelCase = nsfw_detected.flatten() _UpperCAmelCase = nsfw_detected > p_threshold _UpperCAmelCase = nsfw_detected.tolist() if any(snake_case_ ): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, nsfw_detected_ in enumerate(snake_case_ ): if nsfw_detected_: _UpperCAmelCase = np.zeros(images[idx].shape ) _UpperCAmelCase = self.w_head(snake_case_ ) _UpperCAmelCase = watermark_detected.flatten() _UpperCAmelCase = watermark_detected > w_threshold _UpperCAmelCase = watermark_detected.tolist() if any(snake_case_ ): logger.warning( "Potential watermarked content was detected in one or more images. A black image will be returned instead." " Try again with a different prompt and/or seed." ) for idx, watermark_detected_ in enumerate(snake_case_ ): if watermark_detected_: _UpperCAmelCase = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
22
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : str ) -> Union[str, Any]: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=True ) -> Any: model.train() UpperCAmelCase : List[Any] = model(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = F.mse_loss(_lowerCAmelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=False ) -> str: set_seed(42 ) UpperCAmelCase : Union[str, Any] = RegressionModel() UpperCAmelCase : Dict = deepcopy(_lowerCAmelCase ) UpperCAmelCase : Any = RegressionDataset(length=80 ) UpperCAmelCase : Tuple = DataLoader(_lowerCAmelCase , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase : Optional[int] = AdamW(params=model.parameters() , lr=1e-3 ) UpperCAmelCase : str = AdamW(params=ddp_model.parameters() , lr=1e-3 ) UpperCAmelCase : Union[str, Any] = LambdaLR(_lowerCAmelCase , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 ) UpperCAmelCase : int = LambdaLR(_lowerCAmelCase , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 ) # Make a copy of `model` if sched: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: UpperCAmelCase , UpperCAmelCase : int = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[Any]: # Test when on a single CPU or GPU that the context manager does nothing UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_training_setup(_lowerCAmelCase ) # Use a single batch UpperCAmelCase , UpperCAmelCase : Optional[Any] = next(iter(_lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: # Sync grads step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase : str = ddp_input[torch.randperm(len(_lowerCAmelCase ) )] def snake_case_ ( _lowerCAmelCase : Tuple ) -> int: # Test on distributed setup that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_training_setup(_lowerCAmelCase ) # Use a single batch UpperCAmelCase , UpperCAmelCase : List[Any] = next(iter(_lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: # Sync grads step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase : Optional[Any] = ddp_input[torch.randperm(len(_lowerCAmelCase ) )] def snake_case_ ( _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[int]=False ) -> Dict: UpperCAmelCase : Union[str, Any] = Accelerator( split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = get_training_setup(_lowerCAmelCase ) for iteration, batch in enumerate(_lowerCAmelCase ): UpperCAmelCase , UpperCAmelCase : Tuple = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase : List[Any] = ddp_input[torch.randperm(len(_lowerCAmelCase ) )] GradientState._reset_state() def snake_case_ ( _lowerCAmelCase : List[str]=False , _lowerCAmelCase : str=False ) -> Optional[int]: UpperCAmelCase : Dict = Accelerator( split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_training_setup(_lowerCAmelCase , _lowerCAmelCase ) for iteration, batch in enumerate(_lowerCAmelCase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase , UpperCAmelCase : str = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def snake_case_ ( ) -> Tuple: UpperCAmelCase : Optional[Any] = Accelerator() UpperCAmelCase : List[str] = RegressionDataset(length=80 ) UpperCAmelCase : str = DataLoader(_lowerCAmelCase , batch_size=16 ) UpperCAmelCase : List[Any] = RegressionDataset(length=96 ) UpperCAmelCase : List[Any] = DataLoader(_lowerCAmelCase , batch_size=16 ) UpperCAmelCase , UpperCAmelCase : Dict = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase ) if iteration < len(_lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase ) if batch_num < len(_lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def snake_case_ ( ) -> Optional[int]: UpperCAmelCase : int = Accelerator() UpperCAmelCase : Any = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(_lowerCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(_lowerCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(_lowerCAmelCase , _lowerCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
23
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Union[str, Any] = GPTaTokenizer A_ : List[Any] = GPTaTokenizerFast A_ : Dict = True A_ : List[str] = {'add_prefix_space': True} A_ : List[str] = False def a (self : Any ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(a__ ) ) def a (self : List[str] , **a__ : List[Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : Dict , **a__ : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ ) def a (self : int , a__ : Optional[int] ): """simple docstring""" __snake_case = '''lower newer''' __snake_case = '''lower newer''' return input_text, output_text def a (self : Optional[int] ): """simple docstring""" __snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case = '''lower newer''' __snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def a (self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __snake_case = self.get_tokenizer() __snake_case = self.get_rust_tokenizer(add_prefix_space=a__ ) __snake_case = '''lower newer''' # Testing tokenization __snake_case = tokenizer.tokenize(a__ , add_prefix_space=a__ ) __snake_case = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids without special tokens __snake_case = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ ) __snake_case = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) # Testing conversion to ids with special tokens __snake_case = self.get_rust_tokenizer(add_prefix_space=a__ ) __snake_case = tokenizer.encode(a__ , add_prefix_space=a__ ) __snake_case = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) # Testing the unknown token __snake_case = tokens + [rust_tokenizer.unk_token] __snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def a (self : Tuple , *a__ : Optional[int] , **a__ : Optional[Any] ): """simple docstring""" pass def a (self : List[str] , a__ : Any=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) # Simple input __snake_case = '''This is a simple input''' __snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case = ('''This is a simple input''', '''This is a pair''') __snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' ) # Simple input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' ) # Simple input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , ) # Pair input self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''' ) # Pair input self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''' ) # Pair input self.assertRaises( a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , ) def a (self : List[str] ): """simple docstring""" __snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input __snake_case = '''This is a simple input''' __snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] __snake_case = ('''This is a simple input''', '''This is a pair''') __snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] __snake_case = tokenizer.pad_token_id __snake_case = tokenizer(a__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) __snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' ) __snake_case = tokenizer(*a__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) __snake_case = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def a (self : Optional[Any] ): """simple docstring""" __snake_case = '''$$$''' __snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ ) __snake_case = '''This is a simple input''' __snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case = tokenizer.bos_token_id __snake_case = tokenizer(a__ ) __snake_case = tokenizer(a__ ) self.assertEqual(out_s.input_ids[0] , a__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __snake_case = tokenizer.decode(out_s.input_ids ) __snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , a__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def a (self : List[Any] ): """simple docstring""" pass def a (self : Dict ): """simple docstring""" __snake_case = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __snake_case = '''Encode this.''' __snake_case = '''This one too please.''' __snake_case = tokenizer.encode(a__ , add_special_tokens=a__ ) encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ ) __snake_case = tokenizer.encode_plus( a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , ) __snake_case = encoded_sequence_dict['''input_ids'''] __snake_case = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(a__ ) , len(a__ ) ) __snake_case = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ ) ] __snake_case = [x for x in filtered_sequence if x is not None] self.assertEqual(a__ , a__ ) @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[int] ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ ) __snake_case = '''A photo of a cat''' __snake_case = tokenizer.encode( a__ , ) self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('''test_opt''' ) __snake_case = AutoTokenizer.from_pretrained('''./test_opt''' ) __snake_case = tokenizer.encode( a__ , ) self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] ) def a (self : List[str] ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=a__ ) __snake_case = '''A photo of a cat''' __snake_case = tokenizer.encode( a__ , ) # Same as above self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def a (self : Tuple ): """simple docstring""" __snake_case = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=a__ ) __snake_case = '''bos''' __snake_case = tokenizer.get_vocab()['''bos'''] __snake_case = '''A photo of a cat''' __snake_case = tokenizer.encode( a__ , ) # We changed the bos token self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained('''./tok''' ) __snake_case = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) __snake_case = tokenizer.encode( a__ , ) self.assertEqual(a__ , [3_1957, 250, 1345, 9, 10, 4758] )
24
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = data SCREAMING_SNAKE_CASE__ : List[Any] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def __magic_name__ (self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64) SCREAMING_SNAKE_CASE__ : Dict = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def __magic_name__ (self ) -> int: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = list(struct.unpack(""">16L""" , SCREAMING_SNAKE_CASE__ ) ) + [0] * 64 for i in range(16 , 80 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __magic_name__ (self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.padding() SCREAMING_SNAKE_CASE__ : Any = self.split_blocks() for block in self.blocks: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.expand_block(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: SCREAMING_SNAKE_CASE__ : List[str] = (b & c) | ((~b) & d) SCREAMING_SNAKE_CASE__ : Dict = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: SCREAMING_SNAKE_CASE__ : Optional[Any] = b ^ c ^ d SCREAMING_SNAKE_CASE__ : str = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: SCREAMING_SNAKE_CASE__ : Any = (b & c) | (b & d) | (c & d) SCREAMING_SNAKE_CASE__ : List[Any] = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: SCREAMING_SNAKE_CASE__ : Optional[int] = b ^ c ^ d SCREAMING_SNAKE_CASE__ : Optional[Any] = 0Xc_a_6_2_c_1_d_6 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = ( self.rotate(SCREAMING_SNAKE_CASE__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(SCREAMING_SNAKE_CASE__ , 30 ), c, d, ) SCREAMING_SNAKE_CASE__ : List[Any] = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Any = b"""Test String""" assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324 def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,) parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[int] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file ,"""rb""" ) as f: SCREAMING_SNAKE_CASE__ : List[str] = f.read() else: SCREAMING_SNAKE_CASE__ : Dict = bytes(_snake_case ,"""utf-8""" ) print(SHAaHash(_snake_case ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
25
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
import torch def lowerCAmelCase_ ( ): if torch.cuda.is_available(): _A : Dict = torch.cuda.device_count() else: _A : Union[str, Any] = 0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
26
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __lowercase : Tuple = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __lowercase : Any = f'''https://www.google.com/search?q={query}&num=100''' __lowercase : Optional[int] = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __lowercase : int = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __lowercase : Optional[Any] = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
27
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = OrderedDict( [ ("align", "EfficientNetImageProcessor"), ("beit", "BeitImageProcessor"), ("bit", "BitImageProcessor"), ("blip", "BlipImageProcessor"), ("blip-2", "BlipImageProcessor"), ("bridgetower", "BridgeTowerImageProcessor"), ("chinese_clip", "ChineseCLIPImageProcessor"), ("clip", "CLIPImageProcessor"), ("clipseg", "ViTImageProcessor"), ("conditional_detr", "ConditionalDetrImageProcessor"), ("convnext", "ConvNextImageProcessor"), ("convnextv2", "ConvNextImageProcessor"), ("cvt", "ConvNextImageProcessor"), ("data2vec-vision", "BeitImageProcessor"), ("deformable_detr", "DeformableDetrImageProcessor"), ("deit", "DeiTImageProcessor"), ("deta", "DetaImageProcessor"), ("detr", "DetrImageProcessor"), ("dinat", "ViTImageProcessor"), ("donut-swin", "DonutImageProcessor"), ("dpt", "DPTImageProcessor"), ("efficientformer", "EfficientFormerImageProcessor"), ("efficientnet", "EfficientNetImageProcessor"), ("flava", "FlavaImageProcessor"), ("focalnet", "BitImageProcessor"), ("git", "CLIPImageProcessor"), ("glpn", "GLPNImageProcessor"), ("groupvit", "CLIPImageProcessor"), ("imagegpt", "ImageGPTImageProcessor"), ("instructblip", "BlipImageProcessor"), ("layoutlmv2", "LayoutLMv2ImageProcessor"), ("layoutlmv3", "LayoutLMv3ImageProcessor"), ("levit", "LevitImageProcessor"), ("mask2former", "Mask2FormerImageProcessor"), ("maskformer", "MaskFormerImageProcessor"), ("mgp-str", "ViTImageProcessor"), ("mobilenet_v1", "MobileNetV1ImageProcessor"), ("mobilenet_v2", "MobileNetV2ImageProcessor"), ("mobilevit", "MobileViTImageProcessor"), ("mobilevit", "MobileViTImageProcessor"), ("mobilevitv2", "MobileViTImageProcessor"), ("nat", "ViTImageProcessor"), ("oneformer", "OneFormerImageProcessor"), ("owlvit", "OwlViTImageProcessor"), ("perceiver", "PerceiverImageProcessor"), ("pix2struct", "Pix2StructImageProcessor"), ("poolformer", "PoolFormerImageProcessor"), ("regnet", "ConvNextImageProcessor"), ("resnet", "ConvNextImageProcessor"), ("sam", "SamImageProcessor"), ("segformer", "SegformerImageProcessor"), ("swiftformer", "ViTImageProcessor"), ("swin", "ViTImageProcessor"), ("swin2sr", "Swin2SRImageProcessor"), ("swinv2", "ViTImageProcessor"), ("table-transformer", "DetrImageProcessor"), ("timesformer", "VideoMAEImageProcessor"), ("tvlt", "TvltImageProcessor"), ("upernet", "SegformerImageProcessor"), ("van", "ConvNextImageProcessor"), ("videomae", "VideoMAEImageProcessor"), ("vilt", "ViltImageProcessor"), ("vit", "ViTImageProcessor"), ("vit_hybrid", "ViTHybridImageProcessor"), ("vit_mae", "ViTImageProcessor"), ("vit_msn", "ViTImageProcessor"), ("xclip", "CLIPImageProcessor"), ("yolos", "YolosImageProcessor"), ] ) _lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( A__ ) -> Optional[int]: """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: UpperCamelCase = model_type_to_module_name(A__ ) UpperCamelCase = importlib.import_module(F""".{module_name}""" , 'transformers.models' ) try: return getattr(A__ , A__ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(A__ , '__name__' , A__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. UpperCamelCase = importlib.import_module('transformers' ) if hasattr(A__ , A__ ): return getattr(A__ , A__ ) return None def __lowerCamelCase ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ) -> int: """simple docstring""" UpperCamelCase = get_file_from_repo( A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(A__ , encoding='utf-8' ) as reader: return json.load(A__ ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : str ): """simple docstring""" raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(UpperCamelCase__ ) def A ( cls : Optional[int] , UpperCamelCase__ : str , **UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = kwargs.pop('config' , UpperCamelCase__ ) UpperCamelCase = kwargs.pop('trust_remote_code' , UpperCamelCase__ ) UpperCamelCase = True UpperCamelCase , UpperCamelCase = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase__ , **UpperCamelCase__ ) UpperCamelCase = config_dict.get('image_processor_type' , UpperCamelCase__ ) UpperCamelCase = None if "AutoImageProcessor" in config_dict.get('auto_map' , {} ): UpperCamelCase = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: UpperCamelCase = config_dict.pop('feature_extractor_type' , UpperCamelCase__ ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) UpperCamelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): UpperCamelCase = config_dict['auto_map']['AutoFeatureExtractor'] UpperCamelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) # It could be in `config.image_processor_type`` UpperCamelCase = getattr(UpperCamelCase__ , 'image_processor_type' , UpperCamelCase__ ) if hasattr(UpperCamelCase__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map: UpperCamelCase = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: UpperCamelCase = image_processor_class_from_name(UpperCamelCase__ ) UpperCamelCase = image_processor_auto_map is not None UpperCamelCase = image_processor_class is not None or type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING UpperCamelCase = resolve_trust_remote_code( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if has_remote_code and trust_remote_code: UpperCamelCase = get_class_from_dynamic_module( UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) UpperCamelCase = kwargs.pop('code_revision' , UpperCamelCase__ ) if os.path.isdir(UpperCamelCase__ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) elif image_processor_class is not None: return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING: UpperCamelCase = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase__ )] return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) raise ValueError( f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def A ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ): """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
28
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Optional[int] = '''efficientformer''' def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None: super().__init__(**_UpperCamelCase ) UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[Any] = depths UpperCAmelCase_ : List[Any] = mlp_expansion_ratio UpperCAmelCase_ : List[str] = downsamples UpperCAmelCase_ : List[Any] = dim UpperCAmelCase_ : Tuple = key_dim UpperCAmelCase_ : Optional[int] = attention_ratio UpperCAmelCase_ : str = resolution UpperCAmelCase_ : Dict = pool_size UpperCAmelCase_ : Union[str, Any] = downsample_patch_size UpperCAmelCase_ : List[str] = downsample_stride UpperCAmelCase_ : List[str] = downsample_pad UpperCAmelCase_ : Any = drop_path_rate UpperCAmelCase_ : Dict = num_metaad_blocks UpperCAmelCase_ : Dict = distillation UpperCAmelCase_ : int = use_layer_scale UpperCAmelCase_ : Any = layer_scale_init_value UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Dict = batch_norm_eps
29
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class lowercase__( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: lowercase_ = jnp.ones((batch_size, length) ) / length return scores def _lowercase ( self : Tuple ) -> Optional[int]: lowercase_ = None lowercase_ = 2_0 lowercase_ = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE_ ) # tweak scores to not be uniform anymore lowercase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowercase_ = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowercase_ = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ) lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowercase_ = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowercase_ = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 ) lowercase_ = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def _lowercase ( self : str ) -> Union[str, Any]: lowercase_ = None lowercase_ = 1_0 lowercase_ = 2 # create ramp distribution lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy() lowercase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size lowercase_ = FlaxTopKLogitsWarper(3 ) lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowercase_ = 5 lowercase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, length) ).copy() lowercase_ = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def _lowercase ( self : Dict ) -> List[str]: lowercase_ = None lowercase_ = 1_0 lowercase_ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowercase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowercase_ = FlaxTopPLogitsWarper(0.8 ) lowercase_ = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowercase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) ) # check edge cases with negative and extreme logits lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowercase_ = ramp_logits[1] * 1_00.0 # make sure at least 2 tokens are kept lowercase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def _lowercase ( self : Any ) -> List[str]: lowercase_ = 2_0 lowercase_ = 4 lowercase_ = 0 lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ ) # check that min length is applied at length 5 lowercase_ = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) lowercase_ = 5 lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = 1_5 lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() ) def _lowercase ( self : Optional[Any] ) -> int: lowercase_ = 2_0 lowercase_ = 4 lowercase_ = 0 lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ ) # check that all scores are -inf except the bos_token_id score lowercase_ = ids_tensor((batch_size, 1) , vocab_size=2_0 ) lowercase_ = 1 lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowercase_ = 3 lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ = 2_0 lowercase_ = 4 lowercase_ = 0 lowercase_ = 5 lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) # check that all scores are -inf except the eos_token_id when max_length is reached lowercase_ = ids_tensor((batch_size, 4) , vocab_size=2_0 ) lowercase_ = 4 lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowercase_ = 3 lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() ) def _lowercase ( self : List[Any] ) -> List[str]: lowercase_ = 4 lowercase_ = 1_0 lowercase_ = 1_5 lowercase_ = 2 lowercase_ = 1 lowercase_ = 1_5 # dummy input_ids and scores lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ ) lowercase_ = input_ids.copy() lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = scores.copy() # instantiate all dist processors lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowercase_ = FlaxTopKLogitsWarper(3 ) lowercase_ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = 1_0 # no processor list lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) # with processor list lowercase_ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) # scores should be equal self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def _lowercase ( self : List[str] ) -> int: lowercase_ = 4 lowercase_ = 1_0 lowercase_ = 1_5 lowercase_ = 2 lowercase_ = 1 lowercase_ = 1_5 # dummy input_ids and scores lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ ) lowercase_ = input_ids.copy() lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = scores.copy() # instantiate all dist processors lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowercase_ = FlaxTopKLogitsWarper(3 ) lowercase_ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) lowercase_ = 1_0 # no processor list def run_no_processor_list(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ): lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) return scores # with processor list def run_processor_list(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ): lowercase_ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) return scores lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ ) lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ ) lowercase_ = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = jitted_run_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # scores should be equal self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
30
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
'''simple docstring''' def UpperCamelCase_ ( ) -> str: """simple docstring""" _UpperCAmelCase : Dict = 0 for i in range(1 , 1_001 ): total += i**i return str(_UpperCAmelCase )[-10:] if __name__ == "__main__": print(solution())
31
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
import enum import shutil import sys UpperCAmelCase_ , UpperCAmelCase_ : List[str] = shutil.get_terminal_size() UpperCAmelCase_ : List[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class SCREAMING_SNAKE_CASE__ ( enum.Enum ): snake_case__ : Optional[Any] = 0 snake_case__ : Dict = 1 def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[Any]="" ) -> List[str]: """simple docstring""" sys.stdout.write(str(__A ) + end ) sys.stdout.flush() def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : int , __A : List[Any]="" ) -> int: """simple docstring""" forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , __A ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" forceWrite('\r' ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : str ) -> Optional[int]: """simple docstring""" forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
32
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Union[str, Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
33
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A =input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A =BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A =soup.find('meta', {'property': 'og:image'})['content'] A =requests.get(image_url).content A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
34
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __a = logging.get_logger(__name__) @add_end_docstrings( _a , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : GenericTensor ): if self.framework == "tf": snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": snake_case__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ) else: raise ValueError("""Unsupported framework""" ) return masked_index def lowerCamelCase ( self : Optional[Any] , snake_case_ : GenericTensor ): snake_case__ : List[Any] = self.get_masked_index(snake_case_ ) snake_case__ : List[Any] = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def lowerCamelCase ( self : Tuple , snake_case_ : GenericTensor ): if isinstance(snake_case_ , snake_case_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case_ ) def lowerCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ): if return_tensors is None: snake_case__ : Tuple = self.framework snake_case__ : Optional[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ ) self.ensure_exactly_one_mask_token(snake_case_ ) return model_inputs def lowerCamelCase ( self : str , snake_case_ : str ): snake_case__ : Union[str, Any] = self.model(**snake_case_ ) snake_case__ : Dict = model_inputs["""input_ids"""] return model_outputs def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str=5 , snake_case_ : List[Any]=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: snake_case__ : Any = target_ids.shape[0] snake_case__ : List[Any] = model_outputs["""input_ids"""][0] snake_case__ : Optional[Any] = model_outputs["""logits"""] if self.framework == "tf": snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] snake_case__ : Optional[int] = outputs.numpy() snake_case__ : Optional[int] = outputs[0, masked_index, :] snake_case__ : Optional[int] = stable_softmax(snake_case_ , axis=-1 ) if target_ids is not None: snake_case__ : Optional[Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) ) snake_case__ : Optional[int] = tf.expand_dims(snake_case_ , 0 ) snake_case__ : int = tf.math.top_k(snake_case_ , k=snake_case_ ) snake_case__ , snake_case__ : Any = topk.values.numpy(), topk.indices.numpy() else: snake_case__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample snake_case__ : Tuple = outputs[0, masked_index, :] snake_case__ : Tuple = logits.softmax(dim=-1 ) if target_ids is not None: snake_case__ : List[str] = probs[..., target_ids] snake_case__ , snake_case__ : List[str] = probs.topk(snake_case_ ) snake_case__ : Tuple = [] snake_case__ : List[str] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): snake_case__ : Union[str, Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place snake_case__ : Dict = input_ids.numpy().copy() if target_ids is not None: snake_case__ : Any = target_ids[p].tolist() snake_case__ : Union[str, Any] = p # Filter padding out: snake_case__ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back snake_case__ : str = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) snake_case__ : Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence} row.append(snake_case_ ) result.append(snake_case_ ) if single_mask: return result[0] return result def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : str=None ): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Union[str, Any] = [targets] try: snake_case__ : Any = self.tokenizer.get_vocab() except Exception: snake_case__ : str = {} snake_case__ : List[Any] = [] for target in targets: snake_case__ : List[str] = vocab.get(snake_case_ , snake_case_ ) if id_ is None: snake_case__ : int = self.tokenizer( snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )["""input_ids"""] if len(snake_case_ ) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " """We cannot replace it with anything meaningful, ignoring it""" ) continue snake_case__ : Optional[Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) snake_case__ : Optional[Any] = list(set(snake_case_ ) ) if len(snake_case_ ) == 0: raise ValueError("""At least one target must be provided when passed.""" ) snake_case__ : Dict = np.array(snake_case_ ) return target_ids def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None ): snake_case__ : Union[str, Any] = {} if targets is not None: snake_case__ : List[str] = self.get_target_ids(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = target_ids if top_k is not None: snake_case__ : Optional[int] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" ) return {}, {}, postprocess_params def __call__( self : List[str] , snake_case_ : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : List[Any] ): snake_case__ : Optional[int] = super().__call__(snake_case_ , **snake_case_ ) if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1: return outputs[0] return outputs
35
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _snake_case = get_tests_dir("fixtures") class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = mock.Mock() _lowerCAmelCase : int = 500 _lowerCAmelCase : Tuple = {} _lowerCAmelCase : str = HTTPError _lowerCAmelCase : Union[str, Any] = {} # Download this model to make sure it's in the cache. _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=__a) as mock_head: _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json") def snake_case__ ( self): '''simple docstring''' with self.assertRaises(__a): # config is in subfolder, the following should not work without specifying the subfolder _lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") _lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor") self.assertIsNotNone(__a) @is_staging_test class UpperCAmelCase_ ( unittest.TestCase): @classmethod def snake_case__ ( cls): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TOKEN HfFolder.save_token(__a) @classmethod def snake_case__ ( cls): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) _lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) _lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=__a) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
36
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
'''simple docstring''' from copy import deepcopy class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> None: if arr is None and size is not None: lowerCAmelCase__ : List[str] = size lowerCAmelCase__ : str = [0] * size elif arr is not None: self.init(__UpperCAmelCase ) else: raise ValueError("""Either arr or size must be specified""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: lowerCAmelCase__ : Any = len(__UpperCAmelCase ) lowerCAmelCase__ : Any = deepcopy(__UpperCAmelCase ) for i in range(1 ,self.size ): lowerCAmelCase__ : Optional[Any] = self.next_(__UpperCAmelCase ) if j < self.size: self.tree[j] += self.tree[i] def UpperCAmelCase_ ( self ) -> list[int]: lowerCAmelCase__ : Dict = self.tree[:] for i in range(self.size - 1 ,0 ,-1 ): lowerCAmelCase__ : Union[str, Any] = self.next_(__UpperCAmelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> int: return index + (index & (-index)) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> int: return index - (index & (-index)) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value lowerCAmelCase__ : Optional[Any] = self.next_(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: self.add(__UpperCAmelCase ,value - self.get(__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if right == 0: return 0 lowerCAmelCase__ : List[Any] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] lowerCAmelCase__ : Union[str, Any] = self.prev(__UpperCAmelCase ) return result def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int: return self.prefix(__UpperCAmelCase ) - self.prefix(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.query(__UpperCAmelCase ,index + 1 ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: value -= self.tree[0] if value < 0: return -1 lowerCAmelCase__ : Any = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 lowerCAmelCase__ : int = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
37
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class _SCREAMING_SNAKE_CASE ( _a ): def _A ( self : int ): UpperCamelCase :List[Any] = tempfile.mkdtemp() UpperCamelCase :List[str] = 8 # DPR tok UpperCamelCase :Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) UpperCamelCase :List[str] = os.path.join(__lowerCamelCase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok UpperCamelCase :List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase :Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) UpperCamelCase :int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase :Union[str, Any] = {"""unk_token""": """<unk>"""} UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , """bart_tokenizer""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) UpperCamelCase :List[str] = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase :Union[str, Any] = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCamelCase ) ) def _A ( self : List[Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _A ( self : Tuple ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _A ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) ) def _A ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def _A ( self : Tuple ): UpperCamelCase :Optional[int] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _A ( self : Tuple ): UpperCamelCase :List[str] = self.get_dummy_dataset() UpperCamelCase :List[str] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: UpperCamelCase :List[Any] = dataset UpperCamelCase :Dict = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _A ( self : Optional[Any] , __lowerCamelCase : bool ): UpperCamelCase :Union[str, Any] = self.get_dummy_dataset() UpperCamelCase :int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , ) if from_disk: UpperCamelCase :Any = os.path.join(self.tmpdirname , """dataset""" ) UpperCamelCase :List[str] = os.path.join(self.tmpdirname , """index.faiss""" ) dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) ) dataset.drop_index("""embeddings""" ) dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) ) del dataset UpperCamelCase :Optional[Any] = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: UpperCamelCase :Any = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase ) , ) return retriever def _A ( self : Any ): UpperCamelCase :List[Any] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" ) dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" ) pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) ) UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" ) UpperCamelCase :Optional[int] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset} pickle.dump(__lowerCamelCase , open(__lowerCamelCase , """wb""" ) ) UpperCamelCase :Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , ) UpperCamelCase :List[Any] = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _A ( self : Dict ): UpperCamelCase :Union[str, Any] = 1 UpperCamelCase :Dict = self.get_dummy_canonical_hf_index_retriever() UpperCamelCase :Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: UpperCamelCase :List[str] = self.get_dummy_dataset() retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :int = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Dict = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) def _A ( self : Tuple ): UpperCamelCase :int = 1 UpperCamelCase :Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :str = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Optional[int] = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) def _A ( self : Any ): UpperCamelCase :List[str] = 1 UpperCamelCase :Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :Optional[int] = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Any = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) def _A ( self : Optional[Any] ): UpperCamelCase :Any = 1 UpperCamelCase :List[Any] = self.get_dummy_legacy_index_retriever() UpperCamelCase :Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""text"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[int] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :List[Any] = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Tuple = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _A ( self : Any ): import torch UpperCamelCase :List[Any] = 1 UpperCamelCase :List[Any] = self.get_dummy_canonical_hf_index_retriever() UpperCamelCase :int = [[5, 7], [10, 11]] UpperCamelCase :Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Optional[int] = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[int] = ( out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , np.ndarray ) UpperCamelCase :List[str] = retriever( __lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase , return_tensors="""pt""" , ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[int] = ( # noqa: F841 out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], out["""doc_ids"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__lowerCamelCase , torch.Tensor ) self.assertIsInstance(__lowerCamelCase , torch.Tensor ) self.assertIsInstance(__lowerCamelCase , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _A ( self : List[str] ): UpperCamelCase :Dict = self.get_dpr_ctx_encoder_tokenizer() UpperCamelCase :Dict = 1 UpperCamelCase :List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) retriever.set_ctx_encoder_tokenizer(__lowerCamelCase ) UpperCamelCase :int = [[5, 7], [10, 11]] UpperCamelCase :List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Optional[Any] = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase ) self.assertEqual( len(__lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , __lowerCamelCase ) # check for doc token related keys in dictionary.
38
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _a = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['''ConvNextFeatureExtractor'''] _a = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
39
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" from typing import List import numpy as np def lowercase ( A_ )-> int: '''simple docstring''' a : Union[str, Any] = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_ , A_ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) a : str = max(lists_lengths.values() , default=0 ) return max(1 , A_ ) def lowercase ( A_ , A_ )-> List[range]: '''simple docstring''' a : Tuple = [] for group_idx in range(A_ ): a : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break a : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 a : Tuple = range(A_ , start + num_shards_to_add ) shards_indices_per_group.append(A_ ) return shards_indices_per_group def lowercase ( A_ , A_ )-> List[dict]: '''simple docstring''' a : Tuple = _number_of_shards_in_gen_kwargs(A_ ) if num_shards == 1: return [dict(A_ )] else: a : Tuple = _distribute_shards(num_shards=A_ , max_num_jobs=A_ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A_ , A_ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A_ ) ) ] def lowercase ( A_ )-> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A_ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowercase ( A_ , A_ )-> dict: '''simple docstring''' a : Union[str, Any] = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_ , A_ )} a : Optional[int] = {} for size in list_sizes: a : Optional[Any] = list(range(A_ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes a : Union[str, Any] = dict(A_ ) for key, value in shuffled_kwargs.items(): if isinstance(A_ , A_ ): a : int = [value[i] for i in indices_per_size[len(A_ )]] return shuffled_kwargs
40
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: # Initialise PyTorch model lowerCamelCase__ : Optional[Any] = RemBertConfig.from_json_file(UpperCamelCase ) print("""Building PyTorch model from configuration: {}""".format(str(UpperCamelCase ) ) ) lowerCamelCase__ : Optional[Any] = RemBertModel(UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(UpperCamelCase ) ) torch.save(model.state_dict() , UpperCamelCase ) if __name__ == "__main__": _A : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _A : Union[str, Any] =parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
41
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase : List[str] = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' _validate_point(SCREAMING_SNAKE_CASE ) _validate_point(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if point: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): for item in point: if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ): __UpperCamelCase :Optional[int] = ( '''Expected a list of numbers as input, found ''' f"""{type(SCREAMING_SNAKE_CASE ).__name__}""" ) raise TypeError(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :List[str] = f"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE ).__name__}""" raise TypeError(SCREAMING_SNAKE_CASE ) else: raise ValueError('''Missing an input''' ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' _validate_point(SCREAMING_SNAKE_CASE ) _validate_point(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig _a : str = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } _a : Optional[int] = logging.get_logger(__name__) class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Any = "maskformer" _UpperCamelCase : Optional[Any] = {"hidden_size": "mask_feature_size"} _UpperCamelCase : Any = ["resnet", "swin"] _UpperCamelCase : Dict = ["detr"] def __init__( self , a__ = 256 , a__ = 256 , a__ = 0.1 , a__ = False , a__ = None , a__ = None , a__ = 0.0_2 , a__ = 1.0 , a__ = 1.0 , a__ = 1.0 , a__ = 2_0.0 , a__ = None , **a__ , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _lowerCAmelCase : Union[str, Any] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(a__ , a__ ): _lowerCAmelCase : str = backbone_config.pop("""model_type""" ) _lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase : Dict = config_class.from_dict(a__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. " F"Supported model types: {','.join(self.backbones_supported )}" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _lowerCAmelCase : int = DetrConfig() else: # verify that the decoder is supported _lowerCAmelCase : Union[str, Any] = ( decoder_config.pop("""model_type""" ) if isinstance(a__ , a__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F"Transformer Decoder {decoder_type} not supported, please use one of" F" {','.join(self.decoders_supported )}" ) if isinstance(a__ , a__ ): _lowerCAmelCase : int = CONFIG_MAPPING[decoder_type] _lowerCAmelCase : Tuple = config_class.from_dict(a__ ) _lowerCAmelCase : List[Any] = backbone_config _lowerCAmelCase : List[str] = decoder_config # main feature dimension for the model _lowerCAmelCase : int = fpn_feature_size _lowerCAmelCase : Tuple = mask_feature_size # initializer _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = init_xavier_std # Hungarian matcher && loss _lowerCAmelCase : Dict = cross_entropy_weight _lowerCAmelCase : Union[str, Any] = dice_weight _lowerCAmelCase : Tuple = mask_weight _lowerCAmelCase : int = use_auxiliary_loss _lowerCAmelCase : List[Any] = no_object_weight _lowerCAmelCase : int = output_auxiliary_logits _lowerCAmelCase : str = self.decoder_config.encoder_attention_heads _lowerCAmelCase : int = self.decoder_config.num_hidden_layers super().__init__(**a__ ) @classmethod def __A ( cls , a__ , a__ , **a__ ): return cls( backbone_config=a__ , decoder_config=a__ , **a__ , ) def __A ( self ): _lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ ) _lowerCAmelCase : str = self.backbone_config.to_dict() _lowerCAmelCase : Dict = self.decoder_config.to_dict() _lowerCAmelCase : List[Any] = self.__class__.model_type return output
44
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : List[str] __UpperCAmelCase : Optional[str] = None # Automatically constructed __UpperCAmelCase : ClassVar[str] = "dict" __UpperCAmelCase : ClassVar[Any] = None __UpperCAmelCase : str = field(default='Translation' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __UpperCAmelCase ( self ): from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : Optional[List] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[str] = None # Automatically constructed __UpperCAmelCase : ClassVar[str] = "dict" __UpperCAmelCase : ClassVar[Any] = None __UpperCAmelCase : str = field(default='TranslationVariableLanguages' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self ): __a = sorted(set(self.languages ) ) if self.languages else None __a = len(self.languages ) if self.languages else None def __call__( self ): return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def __UpperCAmelCase ( self , _a ): __a = set(self.languages ) if self.languages and set(_a ) - lang_set: raise ValueError( f'''Some languages in example ({', '.join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({', '.join(_a )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __a = [] for lang, text in translation_dict.items(): if isinstance(_a , _a ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __a , __a = zip(*sorted(_a ) ) return {"language": languages, "translation": translations} def __UpperCAmelCase ( self ): from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
45
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'conditional_detr' _SCREAMING_SNAKE_CASE = ['past_key_values'] _SCREAMING_SNAKE_CASE = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=300 , lowercase=6 , lowercase=2_048 , lowercase=8 , lowercase=6 , lowercase=2_048 , lowercase=8 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.25 , **lowercase , ) -> str: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(lowercase , lowercase ): lowerCAmelCase = backbone_config.get("""model_type""" ) lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase = config_class.from_dict(lowercase ) lowerCAmelCase = use_timm_backbone lowerCAmelCase = backbone_config lowerCAmelCase = num_channels lowerCAmelCase = num_queries lowerCAmelCase = d_model lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = encoder_layers lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = init_xavier_std lowerCAmelCase = encoder_layerdrop lowerCAmelCase = decoder_layerdrop lowerCAmelCase = encoder_layers lowerCAmelCase = auxiliary_loss lowerCAmelCase = position_embedding_type lowerCAmelCase = backbone lowerCAmelCase = use_pretrained_backbone lowerCAmelCase = dilation # Hungarian matcher lowerCAmelCase = class_cost lowerCAmelCase = bbox_cost lowerCAmelCase = giou_cost # Loss coefficients lowerCAmelCase = mask_loss_coefficient lowerCAmelCase = dice_loss_coefficient lowerCAmelCase = cls_loss_coefficient lowerCAmelCase = bbox_loss_coefficient lowerCAmelCase = giou_loss_coefficient lowerCAmelCase = focal_alpha super().__init__(is_encoder_decoder=lowercase , **lowercase ) @property def _snake_case ( self ) -> int: return self.encoder_attention_heads @property def _snake_case ( self ) -> int: return self.d_model def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCAmelCase = self.backbone_config.to_dict() lowerCAmelCase = self.__class__.model_type return output class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = version.parse('1.11' ) @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _snake_case ( self ) -> float: return 1e-5 @property def _snake_case ( self ) -> int: return 12
46
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : List[Any] , _a : List[str] , _a : Optional[Any]=2 , _a : List[Any]=8 , _a : Tuple=True , _a : Dict=True , _a : Any=True , _a : int=True , _a : List[str]=99 , _a : int=16 , _a : Union[str, Any]=5 , _a : Optional[int]=2 , _a : Optional[Any]=36 , _a : List[str]="gelu" , _a : Any=0.0 , _a : str=0.0 , _a : Optional[Any]=512 , _a : Tuple=16 , _a : Optional[int]=2 , _a : int=0.02 , _a : int=3 , _a : Optional[Any]=4 , _a : Dict=None , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =seq_length _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =use_input_mask _SCREAMING_SNAKE_CASE =use_token_type_ids _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =intermediate_size _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =hidden_dropout_prob _SCREAMING_SNAKE_CASE =attention_probs_dropout_prob _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =type_vocab_size _SCREAMING_SNAKE_CASE =type_sequence_label_size _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =num_labels _SCREAMING_SNAKE_CASE =num_choices _SCREAMING_SNAKE_CASE =scope def A ( self : Any ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE =None if self.use_input_mask: _SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE =None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None if self.use_labels: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> Tuple: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def A ( self : Dict ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_config() _SCREAMING_SNAKE_CASE =300 return config def A ( self : str ) -> List[str]: '''simple docstring''' ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) =self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A ( self : Any , _a : List[Any] , _a : List[Any] , _a : Union[str, Any] , _a : str , _a : List[Any] , _a : Optional[Any] , _a : List[str] ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a ) _SCREAMING_SNAKE_CASE =model(_a , token_type_ids=_a ) _SCREAMING_SNAKE_CASE =model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : int , _a : List[Any] , _a : List[str] , _a : List[str] , _a : Dict , _a : Optional[Any] , _a : Any , _a : List[Any] , _a : Union[str, Any] , _a : List[str] , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =MraModel(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , ) _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , _a : Optional[Any] , _a : Dict , _a : Optional[int] , _a : str , _a : Optional[int] , _a : Optional[Any] , _a : Tuple ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForMaskedLM(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[str] , _a : Optional[int] , _a : List[Any] , _a : Union[str, Any] , _a : Union[str, Any] , _a : Optional[int] , _a : Dict , _a : List[str] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[str] , _a : Optional[int] , _a : int , _a : Any , _a : str , _a : str , _a : Optional[int] , _a : Dict ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MraForSequenceClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Optional[Any] , _a : List[Any] , _a : List[str] , _a : Any , _a : List[Any] , _a : List[str] , _a : int , _a : Dict ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MraForTokenClassification(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : List[str] , _a : int , _a : Optional[int] , _a : int , _a : Dict , _a : Union[str, Any] , _a : Optional[int] , _a : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_choices _SCREAMING_SNAKE_CASE =MraForMultipleChoice(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Tuple ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) =config_and_inputs _SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( A__ , unittest.TestCase ): A__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) A__ = False A__ = False A__ = False A__ = False A__ = () def A ( self : Dict ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraModelTester(self ) _SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 ) def A ( self : Tuple ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Dict ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def A ( self : List[str] ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE =type self.model_tester.create_and_check_model(*_a ) def A ( self : str ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def A ( self : List[str] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def A ( self : Optional[int] ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def A ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def A ( self : List[str] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def A ( self : int ) -> List[Any]: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE =MraModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip(reason='MRA does not output attentions' ) def A ( self : Optional[int] ) -> Tuple: '''simple docstring''' return @require_torch class A__ ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraModel.from_pretrained('uw-madison/mra-base-512-4' ) _SCREAMING_SNAKE_CASE =torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) ) @slow def A ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) _SCREAMING_SNAKE_CASE =torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =5_0265 _SCREAMING_SNAKE_CASE =torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) ) @slow def A ( self : List[Any] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) _SCREAMING_SNAKE_CASE =torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =5_0265 _SCREAMING_SNAKE_CASE =torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
47
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') SCREAMING_SNAKE_CASE__ : Dict = {'target_lang': 'fi', 'source_lang': 'en'} SCREAMING_SNAKE_CASE__ : Any = '>>zh<<' SCREAMING_SNAKE_CASE__ : int = 'Helsinki-NLP/' if is_torch_available(): SCREAMING_SNAKE_CASE__ : Optional[int] = 'pt' elif is_tf_available(): SCREAMING_SNAKE_CASE__ : str = 'tf' else: SCREAMING_SNAKE_CASE__ : Dict = 'jax' @require_sentencepiece class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Any = MarianTokenizer lowerCamelCase_ : Tuple = False lowerCamelCase_ : List[str] = True def _lowercase ( self ) -> Union[str, Any]: super().setUp() lowerCamelCase : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] lowerCamelCase : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase : int = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] ) lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self , **UpperCamelCase__ ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]: return ( "This is a test", "This is a test", ) def _lowercase ( self ) -> Any: lowerCamelCase : Dict = "</s>" lowerCamelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def _lowercase ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _lowercase ( self ) -> str: lowerCamelCase : Any = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase : Union[str, Any] = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("*" )] self.assertIn("source.spm" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def _lowercase ( self ) -> Any: lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Optional[Any] = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def _lowercase ( self ) -> List[str]: # fmt: off lowerCamelCase : Dict = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) lowerCamelCase : int = "Tämä on testi" lowerCamelCase : List[str] = "This is a test" lowerCamelCase : Optional[int] = [76, 7, 2047, 2] lowerCamelCase : List[str] = [69, 12, 11, 940, 2] lowerCamelCase : Optional[int] = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
48
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Dict = logging.get_logger(__name__) __snake_case :Tuple = { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Dict = '''mvp''' UpperCamelCase__ : Optional[int] = ['''past_key_values'''] UpperCamelCase__ : int = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=50_267 , __SCREAMING_SNAKE_CASE : Any=1_024 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Dict=4_096 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Any=4_096 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=1_024 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=100 , __SCREAMING_SNAKE_CASE : List[str]=800 , **__SCREAMING_SNAKE_CASE : Tuple , ): '''simple docstring''' __a = vocab_size __a = max_position_embeddings __a = d_model __a = encoder_ffn_dim __a = encoder_layers __a = encoder_attention_heads __a = decoder_ffn_dim __a = decoder_layers __a = decoder_attention_heads __a = dropout __a = attention_dropout __a = activation_dropout __a = activation_function __a = init_std __a = encoder_layerdrop __a = decoder_layerdrop __a = classifier_dropout __a = use_cache __a = encoder_layers __a = scale_embedding # scale factor will be sqrt(d_model) if True __a = use_prompt __a = prompt_length __a = prompt_mid_dim super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __SCREAMING_SNAKE_CASE): __a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''')
49
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
from __future__ import annotations def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None ) -> list[list[str]]: lowerCamelCase__ : int = word_bank or [] # create a table lowerCamelCase__ : int = len(_UpperCAmelCase ) + 1 lowerCamelCase__ : list[list[list[str]]] = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value lowerCamelCase__ : str = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: lowerCamelCase__ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
50
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : Tuple = PriorTransformer UpperCAmelCase__ : Tuple = '''hidden_states''' @property def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = 4 UpperCAmelCase_ = 8 UpperCAmelCase_ = 7 UpperCAmelCase_ = floats_tensor((batch_size, embedding_dim)).to(_snake_case) UpperCAmelCase_ = floats_tensor((batch_size, embedding_dim)).to(_snake_case) UpperCAmelCase_ = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(_snake_case) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCamelCase ( self : int , _snake_case : List[str]=0): """simple docstring""" torch.manual_seed(_snake_case) UpperCAmelCase_ = 4 UpperCAmelCase_ = 8 UpperCAmelCase_ = 7 UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case) UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case) UpperCAmelCase_ = torch.randn((batch_size, num_embeddings, embedding_dim)).to(_snake_case) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowerCamelCase ( self : List[Any]): """simple docstring""" return (4, 8) @property def lowerCamelCase ( self : str): """simple docstring""" return (4, 8) def lowerCamelCase ( self : List[Any]): """simple docstring""" UpperCAmelCase_ = { '''num_attention_heads''': 2, '''attention_head_dim''': 4, '''num_layers''': 2, '''embedding_dim''': 8, '''num_embeddings''': 7, '''additional_embeddings''': 4, } UpperCAmelCase_ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase ( self : int): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = PriorTransformer.from_pretrained( '''hf-internal-testing/prior-dummy''' , output_loading_info=_snake_case) self.assertIsNotNone(_snake_case) self.assertEqual(len(loading_info['''missing_keys''']) , 0) model.to(_snake_case) UpperCAmelCase_ = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase_ = self.model_class(**_snake_case) UpperCAmelCase_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''hidden_states''', '''timestep'''] self.assertListEqual(arg_names[:2] , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''') UpperCAmelCase_ = model.to(_snake_case) if hasattr(_snake_case , '''set_default_attn_processor'''): model.set_default_attn_processor() UpperCAmelCase_ = self.get_dummy_seed_input() with torch.no_grad(): UpperCAmelCase_ = model(**_snake_case)[0] UpperCAmelCase_ = output[0, :5].flatten().cpu() print(_snake_case) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. UpperCAmelCase_ = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9]) self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1e-2)) @slow class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[Any]=1 , _snake_case : Any=768 , _snake_case : Optional[Any]=77 , _snake_case : Optional[int]=0): """simple docstring""" torch.manual_seed(_snake_case) UpperCAmelCase_ = batch_size UpperCAmelCase_ = embedding_dim UpperCAmelCase_ = num_embeddings UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case) UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case) UpperCAmelCase_ = torch.randn((batch_size, num_embeddings, embedding_dim)).to(_snake_case) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCamelCase ( self : str): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]], [37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]], # fmt: on ]) def lowerCamelCase ( self : List[str] , _snake_case : List[Any] , _snake_case : List[Any]): """simple docstring""" UpperCAmelCase_ = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''') model.to(_snake_case) UpperCAmelCase_ = self.get_dummy_seed_input(seed=_snake_case) with torch.no_grad(): UpperCAmelCase_ = model(**_snake_case)[0] assert list(sample.shape) == [1, 768] UpperCAmelCase_ = sample[0, :8].flatten().cpu() print(_snake_case) UpperCAmelCase_ = torch.tensor(_snake_case) assert torch_all_close(_snake_case , _snake_case , atol=1e-3)
51
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : int = {"""vocab_file""": """spiece.model"""} __lowerCamelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } __lowerCamelCase : Optional[int] = { """AI-Sweden/gpt-sw3-126m""": 2048, """AI-Sweden/gpt-sw3-350m""": 2048, """AI-Sweden/gpt-sw3-1.6b""": 2048, """AI-Sweden/gpt-sw3-6.7b""": 2048, """AI-Sweden/gpt-sw3-20b""": 2048, } class A__ ( __snake_case ): _UpperCAmelCase :str = VOCAB_FILES_NAMES _UpperCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :List[str] = ['input_ids', 'attention_mask'] def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs UpperCamelCase : str = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) UpperCamelCase : int = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing UpperCamelCase : Union[str, Any] = "<|endoftext|>" if eos_token is None else eos_token UpperCamelCase : List[Any] = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: UpperCamelCase : Dict = unk_token if pad_token is None else pad_token UpperCamelCase : List[str] = eos_token if bos_token is None else bos_token else: UpperCamelCase : List[Any] = "<pad>" if pad_token is None else pad_token UpperCamelCase : Dict = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) UpperCamelCase : List[str] = do_lower_case UpperCamelCase : List[str] = remove_space UpperCamelCase : Any = keep_accents UpperCamelCase : str = vocab_file UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) # Used for whitespace normalization in input texts # fmt : off UpperCamelCase : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing UpperCamelCase : Any = re.compile( F"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.__dict__.copy() UpperCamelCase : int = None return state def __setstate__( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCamelCase : List[Any] = {} UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def __UpperCamelCase( self ): '''simple docstring''' return len(self.sp_model ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = self.non_printing_characters_re.sub("" , A_ ) # Normalize whitespaces UpperCamelCase : Tuple = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization UpperCamelCase : List[str] = unicodedata.normalize("NFC" , A_ ) return text def __UpperCamelCase( self , A_ , **A_ ): '''simple docstring''' UpperCamelCase : List[Any] = self.preprocess_text(A_ ) return self.sp_model.encode(A_ , out_type=A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.sp_model.PieceToId(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.sp_model.IdToPiece(A_ ) @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' return out_string def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Any = [] UpperCamelCase : Optional[int] = "" UpperCamelCase : Optional[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token UpperCamelCase : Optional[int] = True UpperCamelCase : List[str] = [] else: current_sub_tokens.append(A_ ) UpperCamelCase : Dict = False out_string += self.sp_model.decode(A_ ) return out_string def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if not os.path.isdir(A_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase : Tuple = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , "wb" ) as fi: UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def __UpperCamelCase( self , A_ , A_ = False ): '''simple docstring''' if isinstance(A_ , A_ ): UpperCamelCase : int = self.preprocess_text(A_ ) UpperCamelCase : List[str] = self.sp_model.encode(A_ ) else: UpperCamelCase : Optional[int] = [self.preprocess_text(A_ ) for t in text] UpperCamelCase : Union[str, Any] = self.sp_model.encode(A_ ) if return_tensors is True or return_tensors == "pt": UpperCamelCase : int = torch.tensor(A_ ) return token_ids def __UpperCamelCase( self , A_ ): '''simple docstring''' return self.sp_model.decode(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] UpperCamelCase : str = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(A_ ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=A_ )
52
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
53
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
"""simple docstring""" import math def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ = 1 / 1_2345 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 3 while True: __SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(F"{solution() = }")
54
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging a_ : Any = logging.get_logger(__name__) a_ : Optional[Any] = {"""vocab_file""": """spiece.model"""} a_ : Tuple = { """vocab_file""": { """TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""", } } class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="<unk>" , UpperCamelCase="<sep>" , UpperCamelCase="<pad>" , UpperCamelCase="<cls>" , UpperCamelCase="<mask>" , UpperCamelCase=["<eop>", "<eod>"] , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , ) lowerCamelCase_ = 3 lowerCamelCase_ = do_lower_case lowerCamelCase_ = remove_space lowerCamelCase_ = keep_accents lowerCamelCase_ = vocab_file lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " "See https://pypi.org/project/jieba/ for installation." ) lowerCamelCase_ = jieba lowerCamelCase_ = str.maketrans(" \n" , "\u2582\u2583" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def snake_case ( self ): """simple docstring""" return len(self.sp_model ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = None return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase_ = {} lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if self.remove_space: lowerCamelCase_ = " ".join(inputs.strip().split() ) else: lowerCamelCase_ = inputs lowerCamelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase_ = unicodedata.normalize("NFKD" , UpperCamelCase ) lowerCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] ) if self.do_lower_case: lowerCamelCase_ = outputs.lower() return outputs def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.preprocess_text(UpperCamelCase ) lowerCamelCase_ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) lowerCamelCase_ = [] for piece in pieces: if len(UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase_ = cur_pieces[1:] else: lowerCamelCase_ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase ) else: new_pieces.append(UpperCamelCase ) return new_pieces def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.sp_model.PieceToId(UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.sp_model.IdToPiece(UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "".join(UpperCamelCase ).replace(UpperCamelCase , " " ).strip() return out_string def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is not None: return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1] return ([0] * len(UpperCamelCase )) + [1, 1] def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ = os.path.join( UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , "wb" ) as fi: lowerCamelCase_ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = super()._decode(*UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" ) return text
55
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
'''simple docstring''' import qiskit def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> qiskit.result.counts.Counts: '''simple docstring''' snake_case_ = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register snake_case_ = qiskit.QuantumCircuit(__UpperCAmelCase, __UpperCAmelCase ) # Map the quantum measurement to the classical bits circuit.measure([0], [0] ) # Execute the circuit on the simulator snake_case_ = qiskit.execute(__UpperCAmelCase, __UpperCAmelCase, shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__UpperCAmelCase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
56
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
"""simple docstring""" from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging A : str = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: __lowerCAmelCase = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a ) __lowerCAmelCase = dict(scheduler.config ) __lowerCAmelCase = 1 __lowerCAmelCase = FrozenDict(__a ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: __lowerCAmelCase = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration" " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a ) __lowerCAmelCase = dict(scheduler.config ) __lowerCAmelCase = True __lowerCAmelCase = FrozenDict(__a ) if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , ) def snake_case ( self , __a = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowerCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__a ) def snake_case ( self ): self.enable_attention_slicing(__a ) def snake_case ( self ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) __lowerCAmelCase = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self ): if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self , __a , __a , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ): __lowerCAmelCase = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) __lowerCAmelCase = self.segmentation_model(**__a ) __lowerCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() __lowerCAmelCase = self.numpy_to_pil(__a )[0].resize(image.size ) # Run inpainting pipeline with the generated mask __lowerCAmelCase = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
57
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class a_ ( snake_case_ ): '''simple docstring''' def __init__( self ) -> List[str]: _SCREAMING_SNAKE_CASE = [] def snake_case_( self , A , A , A , **A ) -> Tuple: self.events.append("""on_init_end""" ) def snake_case_( self , A , A , A , **A ) -> Optional[int]: self.events.append("""on_train_begin""" ) def snake_case_( self , A , A , A , **A ) -> Any: self.events.append("""on_train_end""" ) def snake_case_( self , A , A , A , **A ) -> Tuple: self.events.append("""on_epoch_begin""" ) def snake_case_( self , A , A , A , **A ) -> Tuple: self.events.append("""on_epoch_end""" ) def snake_case_( self , A , A , A , **A ) -> str: self.events.append("""on_step_begin""" ) def snake_case_( self , A , A , A , **A ) -> Dict: self.events.append("""on_step_end""" ) def snake_case_( self , A , A , A , **A ) -> List[Any]: self.events.append("""on_evaluate""" ) def snake_case_( self , A , A , A , **A ) -> Optional[Any]: self.events.append("""on_predict""" ) def snake_case_( self , A , A , A , **A ) -> Optional[Any]: self.events.append("""on_save""" ) def snake_case_( self , A , A , A , **A ) -> int: self.events.append("""on_log""" ) def snake_case_( self , A , A , A , **A ) -> Union[str, Any]: self.events.append("""on_prediction_step""" ) @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' def snake_case_( self ) -> Any: _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() def snake_case_( self ) -> List[str]: shutil.rmtree(self.output_dir ) def snake_case_( self , A=0 , A=0 , A=64 , A=64 , A=None , A=False , **A ) -> Dict: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _SCREAMING_SNAKE_CASE = RegressionDataset(length=A ) _SCREAMING_SNAKE_CASE = RegressionDataset(length=A ) _SCREAMING_SNAKE_CASE = RegressionModelConfig(a=A , b=A ) _SCREAMING_SNAKE_CASE = RegressionPreTrainedModel(A ) _SCREAMING_SNAKE_CASE = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A ) return Trainer( A , A , train_dataset=A , eval_dataset=A , callbacks=A , ) def snake_case_( self , A , A ) -> Tuple: self.assertEqual(len(A ) , len(A ) ) # Order doesn't matter _SCREAMING_SNAKE_CASE = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) _SCREAMING_SNAKE_CASE = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ ) for cba, cba in zip(A , A ): if isinstance(A , A ) and isinstance(A , A ): self.assertEqual(A , A ) elif isinstance(A , A ) and not isinstance(A , A ): self.assertEqual(A , cba.__class__ ) elif not isinstance(A , A ) and isinstance(A , A ): self.assertEqual(cba.__class__ , A ) else: self.assertEqual(A , A ) def snake_case_( self , A ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = ["""on_init_end""", """on_train_begin"""] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = len(trainer.get_eval_dataloader() ) _SCREAMING_SNAKE_CASE = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(A ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # Callbacks passed at init are added to the default callbacks _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _SCREAMING_SNAKE_CASE = self.get_trainer(disable_tqdm=A ) _SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _SCREAMING_SNAKE_CASE = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = trainer.pop_callback(A ) self.assertEqual(cb.__class__ , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) # We can also add, pop, or remove by instance _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0] trainer.remove_callback(A ) expected_callbacks.remove(A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0] _SCREAMING_SNAKE_CASE = trainer.pop_callback(A ) self.assertEqual(A , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) trainer.add_callback(A ) expected_callbacks.insert(0 , A ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A ) def snake_case_( self ) -> Tuple: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=A ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # Independent log/save/eval _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # A bit of everything _SCREAMING_SNAKE_CASE = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(A , self.get_expected_events(A ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: _SCREAMING_SNAKE_CASE = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A ) in warn_mock.call_args[0][0]
58
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy""" def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int: '''simple docstring''' snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa snake_case : str = "bf16" if fpaa else None snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]: '''simple docstring''' snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]: '''simple docstring''' snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ ) snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) snake_case : Dict = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ ) snake_case : Optional[int] = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
59
from PIL import Image def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = (259 * (level + 255)) / (255 * (259 - level)) def contrast(UpperCamelCase__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(UpperCamelCase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 _UpperCAmelCase : Tuple = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
285
0
"""simple docstring""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging snake_case__ : int = logging.get_logger(__name__) def _snake_case ( _snake_case : Optional[Any] ): lowerCAmelCase : str = r'''\w+[.]\d+''' lowerCAmelCase : List[Any] = re.findall(_snake_case , _snake_case ) for pat in pats: lowerCAmelCase : Union[str, Any] = key.replace(_snake_case , '''_'''.join(pat.split('''.''' ) ) ) return key def _snake_case ( _snake_case : List[str] , _snake_case : Any , _snake_case : Tuple ): lowerCAmelCase : Dict = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase : int = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase : Dict = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase : Dict = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase : List[str] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase : Any = flax_model.init_weights(PRNGKey(_snake_case ) ) lowerCAmelCase : Optional[int] = flatten_dict(_snake_case ) lowerCAmelCase : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase : Dict = rename_key(_snake_case ) lowerCAmelCase : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase, lowerCAmelCase : Any = rename_key_and_reshape_tensor(_snake_case , _snake_case , _snake_case ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowerCAmelCase : Dict = jnp.asarray(_snake_case ) return unflatten_dict(_snake_case )
60
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) # General docstring _UpperCAmelCase : Dict = """ResNetConfig""" # Base docstring _UpperCAmelCase : Optional[int] = """microsoft/resnet-50""" _UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7] # Image classification docstring _UpperCAmelCase : Tuple = """microsoft/resnet-50""" _UpperCAmelCase : int = """tiger cat""" _UpperCAmelCase : Optional[Any] = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) snake_case_ = config.num_channels def a ( self , snake_case ): snake_case_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.pooler(snake_case ) return embedding class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 2 ): super().__init__() snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) snake_case_ = nn.BatchNormad(snake_case ) def a ( self , snake_case ): snake_case_ = self.convolution(snake_case ) snake_case_ = self.normalization(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): super().__init__() snake_case_ = in_channels != out_channels or stride != 1 snake_case_ = out_channels // reduction snake_case_ = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) snake_case_ = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) snake_case_ = ACTaFN[activation] def a ( self , snake_case ): snake_case_ = hidden_state snake_case_ = self.layer(snake_case ) snake_case_ = self.shortcut(snake_case ) hidden_state += residual snake_case_ = self.activation(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): super().__init__() snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer snake_case_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def a ( self , snake_case ): snake_case_ = input for layer in self.layers: snake_case_ = layer(snake_case ) return hidden_state class lowercase ( nn.Module ): def __init__( self , snake_case ): super().__init__() snake_case_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def a ( self , snake_case , snake_case = False , snake_case = True ): snake_case_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) snake_case_ = stage_module(snake_case ) if output_hidden_states: snake_case_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : List[str] = ResNetConfig __SCREAMING_SNAKE_CASE : Any = '''resnet''' __SCREAMING_SNAKE_CASE : int = '''pixel_values''' __SCREAMING_SNAKE_CASE : Tuple = True def a ( self , snake_case ): if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self , snake_case , snake_case=False ): if isinstance(snake_case , snake_case ): snake_case_ = value _UpperCAmelCase : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase : Optional[int] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = encoder_outputs[0] snake_case_ = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase_ , ) class lowercase ( lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) snake_case_ = config.num_labels snake_case_ = ResNetModel(snake_case ) # classification head snake_case_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.pooler_output if return_dict else outputs[1] snake_case_ = self.classifier(snake_case ) snake_case_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case_ = 'single_label_classification' else: snake_case_ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case_ = MSELoss() if self.num_labels == 1: snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case_ = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": snake_case_ = CrossEntropyLoss() snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case_ = BCEWithLogitsLoss() snake_case_ = loss_fct(snake_case , snake_case ) if not return_dict: snake_case_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase_ , ) class lowercase ( lowercase_ , lowercase_ ): def __init__( self , snake_case ): super().__init__(snake_case ) super()._init_backbone(snake_case ) snake_case_ = [config.embedding_size] + config.hidden_sizes snake_case_ = ResNetEmbeddings(snake_case ) snake_case_ = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def a ( self , snake_case , snake_case = None , snake_case = None ): snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = self.embedder(snake_case ) snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) snake_case_ = outputs.hidden_states snake_case_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
285
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = params UpperCAmelCase_ : int = np.array(lowercase_ ) UpperCAmelCase_ : Union[str, Any] = np.array([len(lowercase_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , lowercase_ ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def UpperCamelCase__ ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = self.params.max_model_input_size UpperCAmelCase_ : str = self.lengths > max_len logger.info(F"""Splitting {sum(lowercase_ )} too long sequences.""" ) def divide_chunks(lowercase_ , lowercase_ ): return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )] UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : List[Any] = [] if self.params.mlm: UpperCAmelCase_ , UpperCAmelCase_ : str = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: UpperCAmelCase_ , UpperCAmelCase_ : int = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: UpperCAmelCase_ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: UpperCAmelCase_ : str = np.insert(lowercase_ , 0 , lowercase_ ) if sub_s[-1] != sep_id: UpperCAmelCase_ : List[str] = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ ) assert len(lowercase_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowercase_ ) new_tok_ids.extend(lowercase_ ) new_lengths.extend([len(lowercase_ ) for l in sub_seqs] ) UpperCAmelCase_ : List[str] = np.array(lowercase_ ) UpperCAmelCase_ : List[str] = np.array(lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = len(self ) UpperCAmelCase_ : Tuple = self.lengths > 11 UpperCAmelCase_ : Any = self.token_ids[indices] UpperCAmelCase_ : List[Any] = self.lengths[indices] UpperCAmelCase_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCamelCase__ ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: UpperCAmelCase_ : List[str] = self.params.special_tok_ids["unk_token"] UpperCAmelCase_ : Optional[Any] = len(self ) UpperCAmelCase_ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) UpperCAmelCase_ : List[Any] = (unk_occs / self.lengths) < 0.5 UpperCAmelCase_ : Tuple = self.token_ids[indices] UpperCAmelCase_ : str = self.lengths[indices] UpperCAmelCase_ : int = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCamelCase__ ( self ): """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [t[0] for t in batch] UpperCAmelCase_ : Tuple = [t[1] for t in batch] assert len(lowercase_ ) == len(lowercase_ ) # Max for paddings UpperCAmelCase_ : int = max(lowercase_ ) # Pad token ids if self.params.mlm: UpperCAmelCase_ : Optional[Any] = self.params.special_tok_ids["pad_token"] else: UpperCAmelCase_ : int = self.params.special_tok_ids["unk_token"] UpperCAmelCase_ : int = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids] assert len(tk_ ) == len(lowercase_ ) assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ ) UpperCAmelCase_ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) UpperCAmelCase_ : str = torch.tensor(lowercase_ ) # (bs) return tk_t, lg_t
61
class lowercase : def __init__( self , snake_case , snake_case , snake_case ): snake_case_ = name snake_case_ = value snake_case_ = weight def __repr__( self ): return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def a ( self ): return self.value def a ( self ): return self.name def a ( self ): return self.weight def a ( self ): return self.value / self.weight def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) snake_case_ = [] snake_case_ , snake_case_ = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
285
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
62
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = {} snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids'] snake_case_ = len(example['content'] ) / len(output['input_ids'] ) return output _UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments) _UpperCAmelCase : List[Any] = parser.parse_args() if args.num_workers is None: _UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir) _UpperCAmelCase : Optional[int] = time.time() _UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Union[str, Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') _UpperCAmelCase : Dict = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
285
0
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('Length must be a positive.' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class lowercase( __a ): '''simple docstring''' lowercase__ = "roc_bert" def __init__( self: Tuple, a_: Any=30_522, a_: Any=768, a_: int=12, a_: str=12, a_: Tuple=3_072, a_: List[str]="gelu", a_: Union[str, Any]=0.1, a_: str=0.1, a_: Optional[Any]=512, a_: str=2, a_: Any=0.02, a_: Any=1E-12, a_: Optional[Any]=True, a_: int=0, a_: str="absolute", a_: Any=None, a_: List[Any]=True, a_: Optional[Any]=True, a_: str=768, a_: Union[str, Any]=910, a_: Tuple=512, a_: List[str]=24_858, a_: Optional[int]=True, **a_: List[str], ): '''simple docstring''' _snake_case : str = vocab_size _snake_case : Union[str, Any] = max_position_embeddings _snake_case : str = hidden_size _snake_case : Optional[int] = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : Dict = intermediate_size _snake_case : Optional[Any] = hidden_act _snake_case : Dict = hidden_dropout_prob _snake_case : Optional[Any] = attention_probs_dropout_prob _snake_case : Dict = initializer_range _snake_case : Union[str, Any] = type_vocab_size _snake_case : List[str] = layer_norm_eps _snake_case : List[str] = use_cache _snake_case : List[Any] = enable_pronunciation _snake_case : Union[str, Any] = enable_shape _snake_case : Optional[int] = pronunciation_embed_dim _snake_case : int = pronunciation_vocab_size _snake_case : int = shape_embed_dim _snake_case : Dict = shape_vocab_size _snake_case : Optional[int] = concat_input _snake_case : Tuple = position_embedding_type _snake_case : str = classifier_dropout super().__init__(pad_token_id=a_, **a_ )
64
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) snake_case_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], 'do_convert_rgb': True, } snake_case_ = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(snake_case , snake_case ) def a ( self , **snake_case ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self , **snake_case ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): shutil.rmtree(self.tmpdirname ) def a ( self ): snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ): snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = self.get_image_processor() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def a ( self ): snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) snake_case_ = self.get_image_processor(do_normalize=snake_case ) snake_case_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = self.prepare_image_inputs() snake_case_ = image_processor(snake_case , return_tensors='np' ) snake_case_ = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = processor(text=snake_case ) snake_case_ = tokenizer(snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ = processor.batch_decode(snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self ): snake_case_ = self.get_image_processor() snake_case_ = self.get_tokenizer() snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ = 'Alexandra,T-shirt的价格是15便士。' snake_case_ = self.prepare_image_inputs() snake_case_ = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
285
0
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCamelCase__ = logging.getLogger(__name__) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int) UpperCamelCase__ = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: UpperCamelCase__ = pickle.load(fp) logger.info('Counting occurrences for MLM.') UpperCamelCase__ = Counter() for tk_ids in data: counter.update(tk_ids) UpperCamelCase__ = [0] * args.vocab_size for k, v in counter.items(): UpperCamelCase__ = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
65
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase ( lowercase_ ): @staticmethod @abstractmethod def a ( snake_case ): raise NotImplementedError() @abstractmethod def a ( self ): raise NotImplementedError()
285
0
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __a = logging.get_logger(__name__) __a = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def A_ ( _lowercase ): '''simple docstring''' for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: snake_case_ :Optional[int] = model_type_to_module_name(_lowercase ) snake_case_ :Tuple = importlib.import_module(f""".{module_name}""", """transformers.models""" ) try: return getattr(_lowercase, _lowercase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(_lowercase, """__name__""", _lowercase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. snake_case_ :Optional[int] = importlib.import_module("""transformers""" ) if hasattr(_lowercase, _lowercase ): return getattr(_lowercase, _lowercase ) return None def A_ ( _lowercase, _lowercase = None, _lowercase = False, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, **_lowercase, ): '''simple docstring''' snake_case_ :Union[str, Any] = get_file_from_repo( _lowercase, _lowercase, cache_dir=_lowercase, force_download=_lowercase, resume_download=_lowercase, proxies=_lowercase, use_auth_token=_lowercase, revision=_lowercase, local_files_only=_lowercase, ) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(_lowercase, encoding="""utf-8""" ) as reader: return json.load(_lowercase ) class lowerCamelCase : '''simple docstring''' def __init__( self: List[str] ) -> Tuple: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(snake_case ) def lowerCAmelCase_ ( cls: List[Any] , snake_case: List[Any] , **snake_case: Any ) -> List[Any]: snake_case_ :Optional[int] = kwargs.pop("""config""" , snake_case ) snake_case_ :Optional[int] = kwargs.pop("""trust_remote_code""" , snake_case ) snake_case_ :str = True snake_case_, snake_case_ :int = FeatureExtractionMixin.get_feature_extractor_dict(snake_case , **snake_case ) snake_case_ :List[str] = config_dict.get("""feature_extractor_type""" , snake_case ) snake_case_ :List[Any] = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): snake_case_ :str = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(snake_case , snake_case ): snake_case_ :Union[str, Any] = AutoConfig.from_pretrained(snake_case , **snake_case ) # It could be in `config.feature_extractor_type`` snake_case_ :Dict = getattr(snake_case , """feature_extractor_type""" , snake_case ) if hasattr(snake_case , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: snake_case_ :str = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: snake_case_ :str = feature_extractor_class_from_name(snake_case ) snake_case_ :Any = feature_extractor_auto_map is not None snake_case_ :int = feature_extractor_class is not None or type(snake_case ) in FEATURE_EXTRACTOR_MAPPING snake_case_ :Optional[int] = resolve_trust_remote_code( snake_case , snake_case , snake_case , snake_case ) if has_remote_code and trust_remote_code: snake_case_ :str = get_class_from_dynamic_module( snake_case , snake_case , **snake_case ) snake_case_ :Union[str, Any] = kwargs.pop("""code_revision""" , snake_case ) if os.path.isdir(snake_case ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(snake_case , **snake_case ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(snake_case , **snake_case ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(snake_case ) in FEATURE_EXTRACTOR_MAPPING: snake_case_ :Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(snake_case )] return feature_extractor_class.from_dict(snake_case , **snake_case ) raise ValueError( f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def lowerCAmelCase_ ( snake_case: List[Any] , snake_case: int ) -> Optional[Any]: FEATURE_EXTRACTOR_MAPPING.register(snake_case , snake_case )
66
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
0
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class a__ ( UpperCAmelCase__ ): lowerCamelCase : Dict =CustomTokenizer pass
67
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCAmelCase : Optional[int] = 5_0000 _UpperCAmelCase : Dict = 5000 _UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__) _UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(UpperCamelCase__ ): snake_case_ = dataset[i] @get_duration def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' with dataset.formatted_as(type=UpperCamelCase__ ): for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = dataset[i : i + batch_size] def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] snake_case_ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) snake_case_ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) snake_case_ = generate_example_dataset( os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ ) print('shuffling dataset' ) snake_case_ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) ) snake_case_ = func( UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , 'wb' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
285
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( snake_case , snake_case , snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = StableDiffusionInstructPixaPixPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A__ = PNDMScheduler(skip_prk_steps=lowercase ) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A__ = CLIPTextModel(lowercase ) A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCamelCase ( self , lowercase , lowercase=0 ) -> int: '''simple docstring''' A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] A__ = Image.fromarray(np.uinta(lowercase ) ).convert("RGB" ) if str(lowercase ).startswith("mps" ): A__ = torch.manual_seed(lowercase ) else: A__ = torch.Generator(device=lowercase ).manual_seed(lowercase ) A__ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = StableDiffusionInstructPixaPixPipeline(**lowercase ) A__ = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A__ = self.get_dummy_inputs(lowercase ) A__ = sd_pipe(**lowercase ).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> Any: '''simple docstring''' A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = StableDiffusionInstructPixaPixPipeline(**lowercase ) A__ = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A__ = self.get_dummy_inputs(lowercase ) A__ = "french fries" A__ = sd_pipe(**lowercase , negative_prompt=lowercase ) A__ = output.images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = StableDiffusionInstructPixaPixPipeline(**lowercase ) A__ = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A__ = self.get_dummy_inputs(lowercase ) A__ = [inputs["prompt"]] * 2 A__ = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 A__ = torch.from_numpy(lowercase ).unsqueeze(0 ).to(lowercase ) A__ = image / 2 + 0.5 A__ = image.permute(0 , 3 , 1 , 2 ) A__ = image.repeat(2 , 1 , 1 , 1 ) A__ = sd_pipe(**lowercase ).images A__ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) A__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = "cpu" # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" ) A__ = StableDiffusionInstructPixaPixPipeline(**lowercase ) A__ = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A__ = self.get_dummy_inputs(lowercase ) A__ = sd_pipe(**lowercase ).images A__ = image[0, -3:, -3:, -1] A__ = [round(lowercase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(lowercase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) A__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_dummy_components() A__ = StableDiffusionInstructPixaPixPipeline(**lowercase ) A__ = VaeImageProcessor(do_resize=lowercase , do_normalize=lowercase ) A__ = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A__ = pipe(**self.get_dummy_inputs_by_type(lowercase , input_image_type="pt" ) )[0] A__ = components["vae"] A__ = self.get_dummy_inputs_by_type(lowercase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): A__ = vae.encode(inputs[image_param] ).latent_dist.mode() A__ = pipe(**lowercase )[0] A__ = np.abs(out - out_latents_inputs ).max() self.assertLess(lowercase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self , lowercase=0 ) -> List[str]: '''simple docstring''' A__ = torch.manual_seed(lowercase ) A__ = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) A__ = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A__ = self.get_inputs() A__ = pipe(**lowercase ).images A__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) A__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowercase ) A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A__ = self.get_inputs() A__ = pipe(**lowercase ).images A__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) A__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowercase ) A__ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A__ = self.get_inputs() A__ = pipe(**lowercase ).images A__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) A__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = 0 def callback_fn(lowercase , lowercase , lowercase ) -> None: A__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A__ = latents[0, -3:, -3:, -1] A__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: A__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A__ = latents[0, -3:, -3:, -1] A__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 A__ = False A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowercase , torch_dtype=torch.floataa ) A__ = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A__ = self.get_inputs() pipe(**lowercase , callback=lowercase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowercase , torch_dtype=torch.floataa ) A__ = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A__ = self.get_inputs() A__ = pipe(**lowercase ) A__ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 A__ = inputs["image"].resize((504, 504) ) A__ = "timbrooks/instruct-pix2pix" A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowercase , safety_checker=lowercase , ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A__ = pipe(**lowercase ) A__ = output.images[0] A__ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) A__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
68
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: snake_case_ = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) snake_case_ = val return f[i][j] def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: snake_case_ = dp[i - 1][w_] return dp[n][w_], dp def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) snake_case_ = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): snake_case_ = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(UpperCamelCase__ )} values''' ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): snake_case_ = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase__ ) snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": _UpperCAmelCase : Tuple = [3, 2, 4, 4] _UpperCAmelCase : Optional[Any] = [4, 3, 2, 3] _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : str = 6 _UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
285
0
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = BartphoTokenizer SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() snake_case_ = ['▁This', '▁is', '▁a', '▁t', 'est'] snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__)))) snake_case_ = {'unk_token': '<unk>'} snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['monolingual_vocab_file']) with open(self.monolingual_vocab_file, 'w', encoding='utf-8') as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n') snake_case_ = BartphoTokenizer(lowerCAmelCase__, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]: kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = 'This is a là test' snake_case_ = 'This is a<unk><unk> test' return input_text, output_text def a_ ( self) -> Tuple: snake_case_ = BartphoTokenizer(lowerCAmelCase__, self.monolingual_vocab_file, **self.special_tokens_map) snake_case_ = 'This is a là test' snake_case_ = '▁This ▁is ▁a ▁l à ▁t est'.split() snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), lowerCAmelCase__)
69
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Tuple ={'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str =[ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys A__ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
import os import numpy import onnx def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = os.path.dirname(UpperCamelCase__ ) snake_case_ = os.path.basename(UpperCamelCase__ ) snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCamelCase__ ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
285
0
A_ :Optional[int] = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' A_ :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A_ :Optional[Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
71
import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return vector * sigmoid(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return "".join(sorted(UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return word_by_signature[signature(UpperCamelCase__ )] _UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
285
0
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a =TypeVar("""T""") class A_ ( Generic[T] ): def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : T): __lowerCamelCase : List[Any] = data __lowerCamelCase : Node[T] | None = None def __str__( self : Any): return F"{self.data}" class A_ ( Generic[T] ): def __init__( self : Optional[Any]): __lowerCamelCase : Node[T] | None = None def __iter__( self : int): __lowerCamelCase : List[str] = self.top while node: yield node.data __lowerCamelCase : Optional[int] = node.next def __str__( self : List[Any]): return "->".join([str(SCREAMING_SNAKE_CASE__) for item in self]) def __len__( self : List[str]): return len(tuple(iter(self))) def lowerCAmelCase ( self : int): return self.top is None def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : T): __lowerCamelCase : Dict = Node(SCREAMING_SNAKE_CASE__) if not self.is_empty(): __lowerCamelCase : int = self.top __lowerCamelCase : Union[str, Any] = node def lowerCAmelCase ( self : Tuple): if self.is_empty(): raise IndexError('pop from empty stack') assert isinstance(self.top ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.top __lowerCamelCase : List[str] = self.top.next return pop_node.data def lowerCAmelCase ( self : Union[str, Any]): if self.is_empty(): raise IndexError('peek from empty stack') assert self.top is not None return self.top.data def lowerCAmelCase ( self : int): __lowerCamelCase : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
73
from __future__ import annotations import numpy as np def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ , snake_case_ = np.shape(UpperCamelCase__ ) if rows != columns: snake_case_ = ( '\'table\' has to be of square shaped array but got a ' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(UpperCamelCase__ ) snake_case_ = np.zeros((rows, columns) ) snake_case_ = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) snake_case_ = (table[i][j] - total) / upper[j][j] snake_case_ = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) snake_case_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
285
0
"""simple docstring""" def _snake_case ( snake_case__ : int = 10 , snake_case__ : int = 22 ): A = range(1 , snake_case__ ) A = range(1 , snake_case__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a ( self ): snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case ) snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = jax.random.PRNGKey(0 ) snake_case_ = 50 snake_case_ = jax.device_count() snake_case_ = num_samples * [prompt] snake_case_ = num_samples * [init_image] snake_case_ = num_samples * [mask_image] snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case ) # shard inputs and rng snake_case_ = replicate(snake_case ) snake_case_ = jax.random.split(snake_case , jax.device_count() ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = shard(snake_case ) snake_case_ = pipeline( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case ) snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 ) snake_case_ = images[0, 253:256, 253:256, -1] snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
285
0
'''simple docstring''' from __future__ import annotations a_ : Any = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def a_ ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : int , __snake_case : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: """simple docstring""" lowerCamelCase_ =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the reference grid lowerCamelCase_ =1 lowerCamelCase_ =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the action grid lowerCamelCase_ =init[0] lowerCamelCase_ =init[1] lowerCamelCase_ =0 lowerCamelCase_ =g + heuristic[x][y] # cost from starting cell to destination cell lowerCamelCase_ =[[f, g, x, y]] lowerCamelCase_ =False # flag that is set when search is complete lowerCamelCase_ =False # flag set if we can't find expand while not found and not resign: if len(__snake_case ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCamelCase_ =cell.pop() lowerCamelCase_ =next_cell[2] lowerCamelCase_ =next_cell[3] lowerCamelCase_ =next_cell[1] if x == goal[0] and y == goal[1]: lowerCamelCase_ =True else: for i in range(len(__snake_case ) ): # to try out different valid actions lowerCamelCase_ =x + DIRECTIONS[i][0] lowerCamelCase_ =y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCamelCase_ =g + cost lowerCamelCase_ =ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCamelCase_ =1 lowerCamelCase_ =i lowerCamelCase_ =[] lowerCamelCase_ =goal[0] lowerCamelCase_ =goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCamelCase_ =x - DIRECTIONS[action[x][y]][0] lowerCamelCase_ =y - DIRECTIONS[action[x][y]][1] lowerCamelCase_ =xa lowerCamelCase_ =ya invpath.append([x, y] ) lowerCamelCase_ =[] for i in range(len(__snake_case ) ): path.append(invpath[len(__snake_case ) - 1 - i] ) return path, action if __name__ == "__main__": a_ : Dict = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] a_ : Tuple = [0, 0] # all coordinates are given in format [y,x] a_ : Optional[int] = [len(grid) - 1, len(grid[0]) - 1] a_ : List[Any] = 1 # the cost map which pushes the path closer to the goal a_ : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): a_ : int = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map a_ : int = 99 a_ , a_ : str = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
75
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) ) def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCamelCase__ ) snake_case_ = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
285
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ = logging.get_logger(__name__) a_ = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def lowerCamelCase__ ( _a): for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE : str = model_type_to_module_name(_a) SCREAMING_SNAKE_CASE : List[str] = importlib.import_module(f".{module_name}" , "transformers.models") try: return getattr(_a , _a) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_a , "__name__" , _a) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module("transformers") if hasattr(_a , _a): return getattr(_a , _a) return None def lowerCamelCase__ ( _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ): SCREAMING_SNAKE_CASE : Any = get_file_from_repo( _a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead.") return {} with open(_a , encoding="utf-8") as reader: return json.load(_a) class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[int] ) -> Optional[Any]: """simple docstring""" raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(a ) def __UpperCamelCase ( cls : Optional[int] , a : List[str] , **a : List[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("config" , a ) SCREAMING_SNAKE_CASE : Any = kwargs.pop("trust_remote_code" , a ) SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = ImageProcessingMixin.get_image_processor_dict(a , **a ) SCREAMING_SNAKE_CASE : Any = config_dict.get("image_processor_type" , a ) SCREAMING_SNAKE_CASE : Tuple = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : str = config_dict["auto_map"]["AutoImageProcessor"] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE : Optional[Any] = config_dict.pop("feature_extractor_type" , a ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["auto_map"]["AutoFeatureExtractor"] SCREAMING_SNAKE_CASE : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(a , a ): SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(a , **a ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE : Optional[int] = getattr(a , "image_processor_type" , a ) if hasattr(a , "auto_map" ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE : Union[str, Any] = config.auto_map["AutoImageProcessor"] if image_processor_class is not None: SCREAMING_SNAKE_CASE : Dict = image_processor_class_from_name(a ) SCREAMING_SNAKE_CASE : Tuple = image_processor_auto_map is not None SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor_class is not None or type(a ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE : Optional[int] = resolve_trust_remote_code( a , a , a , a ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE : int = get_class_from_dynamic_module( a , a , **a ) SCREAMING_SNAKE_CASE : Dict = kwargs.pop("code_revision" , a ) if os.path.isdir(a ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(a , **a ) elif image_processor_class is not None: return image_processor_class.from_dict(a , **a ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(a ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(a )] return image_processor_class.from_dict(a , **a ) raise ValueError( F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a " F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following " F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" ) @staticmethod def __UpperCamelCase ( a : Dict , a : List[str] ) -> Optional[Any]: """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(a , a )
76
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast __SCREAMING_SNAKE_CASE : int = BloomTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def a ( self ): super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self , **snake_case ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids'] self.assertListEqual(snake_case , snake_case ) snake_case_ = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def a ( self , snake_case=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.encode_plus(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) tokenizer_r.encode(snake_case , max_length=snake_case ) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) snake_case_ = None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' ) # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def a ( self ): snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case ) snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data snake_case_ = list(sample_data.values() ) snake_case_ = list(map(tokenizer.encode , snake_case ) ) snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens] self.assertListEqual(snake_case , snake_case ) def a ( self ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
285
0
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() _UpperCamelCase : Optional[int] = logging.get_logger(__name__) _UpperCamelCase : str = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _UpperCamelCase : Optional[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Any = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : str = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[int] = value elif weight_type == "weight_g": lowercase__ : int = value elif weight_type == "weight_v": lowercase__ : Optional[Any] = value elif weight_type == "bias": lowercase__ : int = value else: lowercase__ : Dict = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : str = [] lowercase__ : int = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.feature_extractor lowercase__ : Dict = hf_model.adapter for name, value in fairseq_dict.items(): lowercase__ : int = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Tuple = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Dict = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: lowercase__ : Optional[Any] = True if "*" in mapped_key: lowercase__ : str = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : Tuple = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : Tuple = 'weight_g' elif "weight_v" in name: lowercase__ : Any = 'weight_v' elif "bias" in name: lowercase__ : Tuple = 'bias' elif "weight" in name: lowercase__ : List[str] = 'weight' else: lowercase__ : str = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Union[str, Any] = full_name.split('conv_layers.' )[-1] lowercase__ : str = name.split('.' ) lowercase__ : Union[str, Any] = int(items[0] ) lowercase__ : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowercase__ : Dict = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ): '''simple docstring''' lowercase__ : Tuple = full_name.split('adaptor.' )[-1] lowercase__ : Dict = name.split('.' ) if items[1].isdigit(): lowercase__ : Dict = int(items[1] ) else: lowercase__ : Union[str, Any] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.""" lowercase__ : Any = value logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.""" lowercase__ : List[Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.""" lowercase__ : Optional[Any] = value logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.""" lowercase__ : Union[str, Any] = value logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" ) elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.""" lowercase__ : int = value logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.""" lowercase__ : Dict = value logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ , lowercase__ : Union[str, Any] = emb.weight.shape lowercase__ : str = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) lowercase__ : Optional[int] = emb.weight.data return lin_layer @torch.no_grad() def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , ): '''simple docstring''' lowercase__ : Optional[int] = WavaVecaConfig.from_pretrained( _lowerCAmelCase , add_adapter=_lowerCAmelCase , adapter_stride=_lowerCAmelCase , adapter_kernel_size=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , output_hidden_size=_lowerCAmelCase , ) lowercase__ : List[str] = MBartConfig.from_pretrained(_lowerCAmelCase ) # load model lowercase__ , lowercase__ , lowercase__ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) lowercase__ : List[str] = model[0].eval() # load feature extractor lowercase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase , use_auth_token=_lowerCAmelCase ) # set weights for wav2vec2 encoder lowercase__ : int = WavaVecaModel(_lowerCAmelCase ) recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) # load decoder weights lowercase__ : List[Any] = MBartForCausalLM(_lowerCAmelCase ) lowercase__ , lowercase__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) lowercase__ : Optional[int] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) lowercase__ : Dict = False lowercase__ : Any = MBartaaTokenizer(_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) lowercase__ : str = hf_wavavec.config.to_dict() lowercase__ : Dict = tokenizer.pad_token_id lowercase__ : str = tokenizer.bos_token_id lowercase__ : Tuple = tokenizer.eos_token_id lowercase__ : Any = 'mbart50' lowercase__ : Union[str, Any] = 'wav2vec2' lowercase__ : Optional[int] = tokenizer.eos_token_id lowercase__ : Union[str, Any] = 25_0004 lowercase__ : int = tokenizer.eos_token_id lowercase__ : str = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config") _UpperCamelCase : Any = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
77
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
0
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = (DDIMParallelScheduler,) __UpperCamelCase = (("""eta""", 0.0), ("""num_inference_steps""", 50)) def UpperCAmelCase__ ( self :str , **lowercase_ :Tuple ) -> Optional[int]: UpperCAmelCase = { 'num_train_timesteps': 10_00, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowercase_ ) return config def UpperCAmelCase__ ( self :Dict , **lowercase_ :Optional[int] ) -> str: UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase = scheduler_class(**lowercase_ ) UpperCAmelCase , UpperCAmelCase = 10, 0.0 UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for t in scheduler.timesteps: UpperCAmelCase = model(lowercase_ , lowercase_ ) UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCAmelCase__ ( self :str ) -> List[str]: for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCAmelCase__ ( self :str ) -> Union[str, Any]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase = scheduler_class(**lowercase_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def UpperCAmelCase__ ( self :str ) -> str: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCAmelCase__ ( self :str ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCAmelCase__ ( self :int ) -> Optional[int]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase_ ) def UpperCAmelCase__ ( self :Dict ) -> List[str]: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowercase_ ) def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowercase_ ) def UpperCAmelCase__ ( self :int ) -> str: self.check_over_configs(thresholding=lowercase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , ) def UpperCAmelCase__ ( self :List[Any] ) -> Union[str, Any]: for t in [1, 10, 49]: self.check_over_forward(time_step=lowercase_ ) def UpperCAmelCase__ ( self :str ) -> Union[str, Any]: for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ ) def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]: for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=lowercase_ , eta=lowercase_ ) def UpperCAmelCase__ ( self :List[str] ) -> Union[str, Any]: UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**lowercase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]: UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**lowercase_ ) UpperCAmelCase , UpperCAmelCase = 10, 0.0 scheduler.set_timesteps(lowercase_ ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter UpperCAmelCase = self.dummy_sample_deter + 0.1 UpperCAmelCase = self.dummy_sample_deter - 0.1 UpperCAmelCase = samplea.shape[0] UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCAmelCase = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ ) UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCAmelCase = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ ) UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def UpperCAmelCase__ ( self :str ) -> List[str]: UpperCAmelCase = self.full_loop() UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.22_3967 ) < 1E-3 def UpperCAmelCase__ ( self :str ) -> List[Any]: UpperCAmelCase = self.full_loop(prediction_type='v_prediction' ) UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def UpperCAmelCase__ ( self :Optional[Any] ) -> Any: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def UpperCAmelCase__ ( self :Optional[int] ) -> Any: # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
78
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : Dict = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCAmelCase : Tuple = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = 0xE000 _UpperCAmelCase : Dict = 0xE001 _UpperCAmelCase : Optional[int] = 0xE002 _UpperCAmelCase : Tuple = 0xE003 _UpperCAmelCase : Tuple = 0xE004 # Maps special codepoints to human-readable names. _UpperCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ): snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , ) # Creates a mapping for looking up the IDs of special symbols. snake_case_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): snake_case_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. snake_case_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } snake_case_ = UNICODE_VOCAB_SIZE snake_case_ = len(self._special_codepoints ) @property def a ( self ): return self._unicode_vocab_size def a ( self , snake_case ): return list(snake_case ) def a ( self , snake_case ): try: return ord(snake_case ) except TypeError: raise ValueError(F'''invalid token: \'{token}\'''' ) def a ( self , snake_case ): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(snake_case ) except TypeError: raise ValueError(F'''invalid id: {index}''' ) def a ( self , snake_case ): return "".join(snake_case ) def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def a ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) snake_case_ = [1] + ([0] * len(snake_case )) + [1] if token_ids_a is not None: result += ([0] * len(snake_case )) + [1] return result def a ( self , snake_case , snake_case = None ): snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def a ( self , snake_case , snake_case = None ): return ()
285
0
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _A = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase , cache_dir=__UpperCAmelCase ) _A = [t[-1] for t in os.walk(os.path.join(__UpperCAmelCase , os.listdir(__UpperCAmelCase )[0] , "snapshots" ) )] _A = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 4 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3 assert np.abs(np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1 _A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(__UpperCAmelCase ) == num_samples def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__UpperCAmelCase ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1 def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , ) _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , ) _A = scheduler.create_state() _A = scheduler_state _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1 def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.device_count() _A = num_samples * [prompt] _A = jax.random.split(jax.random.PRNGKey(0 ) , __UpperCAmelCase ) _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , ) _A = replicate(__UpperCAmelCase ) _A = pipeline.prepare_inputs(__UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) _A = images[2, 0, 256, 10:17, 1] # With memory efficient attention _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , use_memory_efficient_attention=__UpperCAmelCase , ) _A = replicate(__UpperCAmelCase ) _A = pipeline.prepare_inputs(__UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _A = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
79
def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] _UpperCAmelCase : Union[str, Any] = generate_large_matrix() _UpperCAmelCase : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid ) assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(UpperCamelCase__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: snake_case_ = (left + right) // 2 snake_case_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: snake_case_ = mid + 1 else: snake_case_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(grid[0] ) for i in range(len(UpperCamelCase__ ) ): snake_case_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCamelCase__ ) * len(grid[0] )) - total def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for row in grid: for i, number in enumerate(UpperCamelCase__ ): if number < 0: total += len(UpperCamelCase__ ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('Running benchmarks' ) snake_case_ = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
285
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Any = logging.get_logger(__name__) a__ : str = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class lowercase_ ( a__ ): __UpperCAmelCase = 'lilt' def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ): super().__init__(pad_token_id=a , **a ) UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_act UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = position_embedding_type UpperCamelCase__ = classifier_dropout UpperCamelCase__ = channel_shrink_ratio UpperCamelCase__ = max_ad_position_embeddings
80
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : Dict = logging.get_logger(__name__) class lowercase : def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ): if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self , snake_case ): if not isinstance(snake_case , snake_case ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a ( self , snake_case , snake_case = False ): if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) snake_case_ = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: snake_case_ = text def a ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case_ = None def a ( self , snake_case ): self.generated_responses.append(snake_case ) def a ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): snake_case_ = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( lowercase_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowercase ( lowercase_ ): def __init__( self , *snake_case , **snake_case ): super().__init__(*snake_case , **snake_case ) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case ) return preprocess_params, forward_params, postprocess_params def __call__( self , snake_case , snake_case=0 , **snake_case ): snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case ) if isinstance(snake_case , snake_case ) and len(snake_case ) == 1: return outputs[0] return outputs def a ( self , snake_case , snake_case=32 ): if not isinstance(snake_case , snake_case ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(snake_case ) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case_ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a ( self , snake_case , snake_case=10 , **snake_case ): snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length ) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation' ) snake_case_ = max_length snake_case_ = self.model.generate(**snake_case , **snake_case ) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a ( self , snake_case , snake_case=True ): snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(snake_case ) return conversation def a ( self , snake_case ): snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) if len(snake_case ) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
285
0