code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(__a ):
"""simple docstring"""
lowercase_ = (DDPMScheduler,)
def snake_case ( self : List[str] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ : Optional[Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**lowerCAmelCase_ )
return config
def snake_case ( self : str ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def snake_case ( self : str ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def snake_case ( self : List[str] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def snake_case ( self : Optional[int] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def snake_case ( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def snake_case ( self : Optional[Any] ):
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def snake_case ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def snake_case ( self : List[str] ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def snake_case ( self : int ):
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**lowerCAmelCase_ )
lowercase__ : Optional[int] = len(lowerCAmelCase_ )
lowercase__ : Optional[Any] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
lowercase__ : Tuple = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : List[str] = torch.sum(torch.abs(lowerCAmelCase_ ) )
lowercase__ : List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def snake_case ( self : Tuple ):
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase__ : Optional[int] = scheduler_class(**lowerCAmelCase_ )
lowercase__ : Optional[int] = len(lowerCAmelCase_ )
lowercase__ : Any = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
lowercase__ : Dict = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
lowercase__ : Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ : str = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : int = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
lowercase__ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def snake_case ( self : Dict ):
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**lowerCAmelCase_ )
lowercase__ : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
lowercase__ : Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
lowercase__ : Any = -1
else:
lowercase__ : str = timesteps[i + 1]
lowercase__ : Union[str, Any] = scheduler.previous_timestep(lowerCAmelCase_ )
lowercase__ : List[str] = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**lowerCAmelCase_ )
lowercase__ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def snake_case ( self : List[Any] ):
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**lowerCAmelCase_ )
lowercase__ : Dict = [100, 87, 50, 1, 0]
lowercase__ : int = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def snake_case ( self : List[Any] ):
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**lowerCAmelCase_ )
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 719 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 0 |
from math import isclose, sqrt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = point_y / 4 / point_x
lowercase__ : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowercase__ : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowercase__ : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowercase__ : Tuple = outgoing_gradient**2 + 4
lowercase__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowercase__ : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowercase__ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowercase__ : Union[str, Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowercase__ : Union[str, Any] = x_minus if isclose(UpperCamelCase__ , UpperCamelCase__ ) else x_plus
lowercase__ : str = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowerCamelCase ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ):
"""simple docstring"""
lowercase__ : int = 0
lowercase__ : float = first_x_coord
lowercase__ : float = first_y_coord
lowercase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowercase__ : Any = next_point(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : List[str] ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase__ : Optional[int] = eval_examples
lowercase__ : Tuple = post_process_function
lowercase__ : Union[str, Any] = quant_trainer_args
lowercase__ : int = 128 # default number of calibration samples
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Tuple=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
lowercase__ : int = calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase__ : str = self._remove_unused_columns(UpperCAmelCase__ , description="Calibration" )
return DataLoader(
UpperCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase__ , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int]=None ):
lowercase__ : str = self.train_dataset if calib_dataset is None else calib_dataset
lowercase__ : int = self.get_calib_dataloader(UpperCAmelCase__ )
lowercase__ : Union[str, Any] = self.model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args , calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info("***** Running calibration *****" )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
lowercase__ , lowercase__ , lowercase__ : str = self.prediction_step(UpperCAmelCase__ , UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ , self.quant_trainer_args )
lowercase__ : Union[str, Any] = model
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : str = "eval" ):
lowercase__ : Any = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : Optional[int] = self.get_eval_dataloader(UpperCAmelCase__ )
lowercase__ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[Any] = self.compute_metrics
lowercase__ : Any = None
lowercase__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Optional[int] = eval_loop(
UpperCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowercase__ : Any = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase__ : int = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
lowercase__ : Union[str, Any] = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase__ : Tuple = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
lowercase__ : Dict = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : str = "test" ):
lowercase__ : Tuple = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : Optional[int] = self.compute_metrics
lowercase__ : List[str] = None
lowercase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Optional[int] = eval_loop(
UpperCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowercase__ : Optional[int] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : Optional[Any] = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , "predict" )
lowercase__ : str = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowercase__ : Optional[Any] = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict="./" ):
lowercase__ : int = self.eval_dataset
lowercase__ : List[Any] = self.get_eval_dataloader(UpperCAmelCase__ )
lowercase__ : Dict = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
lowercase__ : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
lowercase__ : str = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
lowercase__ : Tuple = True
lowercase__ : int = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
lowercase__ : str = model.module if hasattr(UpperCAmelCase__ , "module" ) else model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args )
lowercase__ : Optional[int] = os.path.join(UpperCAmelCase__ , "model.onnx" )
logger.info(f"""exporting model to {output_model_file}""" )
lowercase__ : int = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , export_params=UpperCAmelCase__ , opset_version=13 , do_constant_folding=UpperCAmelCase__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=UpperCAmelCase__ , )
logger.info("onnx export finished" )
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 0 |
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = sum(a_i[j] for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ) )
lowercase__ : Dict = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ) , lowerCamelCase__ ) ) )
lowercase__ , lowercase__ : Optional[Any] = 0, 0
lowercase__ : Any = n - i
lowercase__ : List[str] = memo.get(lowerCamelCase__ )
if sub_memo is not None:
lowercase__ : Any = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
lowercase__ : Union[str, Any] = -1
for _k in range(len(lowerCamelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : List[str] = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__ : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any] = diff + c
for j in range(min(lowerCamelCase__ , len(lowerCamelCase__ ) ) ):
lowercase__ , lowercase__ : List[str] = divmod(lowerCamelCase__ , 10 )
if new_c > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
lowercase__ : Tuple = []
else:
lowercase__ : str = {c: []}
lowercase__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__ : int = next_term(lowerCamelCase__ , k - 1 , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__ : List[Any] = compute(lowerCamelCase__ , lowerCamelCase__ , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
lowercase__ : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : List[Any] = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__ , (diff, dn, k) )
return (diff, dn)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : Dict = i
lowercase__ , lowercase__ , lowercase__ : Tuple = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Optional[Any] = ds_c + ds_b
diff += addend
lowercase__ : Optional[Any] = 0
for j in range(lowerCamelCase__ ):
lowercase__ : List[str] = a_i[j] + addend
lowercase__ , lowercase__ : Tuple = divmod(lowerCamelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return diff, i - start_i
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__ : Optional[Any] = divmod(lowerCamelCase__ , 10 )
lowercase__ : str = addend // 10 + quotient
else:
lowercase__ : Any = s
lowercase__ : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__ : Union[str, Any] = divmod(lowerCamelCase__ , 10 )
digits.append(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ = 10**15 ):
"""simple docstring"""
lowercase__ : List[Any] = [1]
lowercase__ : Tuple = 1
lowercase__ : str = 0
while True:
lowercase__ , lowercase__ : Union[str, Any] = next_term(lowerCamelCase__ , 20 , i + dn , lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : Tuple = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class snake_case__(unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : List[str] ):
lowercase__ : Dict = load_tool("text-to-speech" )
self.tool.setup()
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = self.tool("hey" )
lowercase__ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = self.tool("hey" )
lowercase__ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 701 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return line.startswith(lowercase__ ) or len(lowercase__ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , lowercase__ ) is not None
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = object_name.split("." )
lowercase__ : Any = 0
# First let's find the module where our object lives.
lowercase__ : Union[str, Any] = parts[i]
while i < len(lowercase__ ) and not os.path.isfile(os.path.join(lowercase__ , F"""{module}.py""" ) ):
i += 1
if i < len(lowercase__ ):
lowercase__ : str = os.path.join(lowercase__ , parts[i] )
if i >= len(lowercase__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowercase__ , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ : List[str] = f.readlines()
# Now let's find the class / func in the code!
lowercase__ : Optional[Any] = ""
lowercase__ : List[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase__ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase__ : str = line_index
while line_index < len(lowercase__ ) and _should_continue(lines[line_index] , lowercase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase__ : int = lines[start_index:line_index]
return "".join(lowercase__ )
lowerCAmelCase__ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(r'''<FILL\s+[^>]*>''')
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = code.split("\n" )
lowercase__ : List[str] = 0
while idx < len(lowercase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase__ ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = len(get_indent(lowercase__ ) ) > 0
if has_indent:
lowercase__ : int = F"""class Bla:\n{code}"""
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowercase__ )
lowercase__ : Optional[int] = black.format_str(lowercase__ , mode=lowercase__ )
lowercase__ , lowercase__ : Any = style_docstrings_in_code(lowercase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ : Any = f.readlines()
lowercase__ : Any = []
lowercase__ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase__ ):
lowercase__ : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase__ , lowercase__ , lowercase__ : Any = search.groups()
lowercase__ : Any = find_code_in_diffusers(lowercase__ )
lowercase__ : Union[str, Any] = get_indent(lowercase__ )
lowercase__ : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase__ : Any = theoretical_indent
lowercase__ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase__ : Union[str, Any] = True
while line_index < len(lowercase__ ) and should_continue:
line_index += 1
if line_index >= len(lowercase__ ):
break
lowercase__ : List[Any] = lines[line_index]
lowercase__ : Optional[Any] = _should_continue(lowercase__ , lowercase__ ) and re.search(F"""^{indent}# End copy""" , lowercase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase__ : List[str] = lines[start_index:line_index]
lowercase__ : Any = "".join(lowercase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase__ : Optional[int] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowercase__ ) is None]
lowercase__ : List[Any] = "\n".join(lowercase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase__ ) > 0:
lowercase__ : Tuple = replace_pattern.replace("with" , "" ).split("," )
lowercase__ : Dict = [_re_replace_pattern.search(lowercase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = pattern.groups()
lowercase__ : Union[str, Any] = re.sub(lowercase__ , lowercase__ , lowercase__ )
if option.strip() == "all-casing":
lowercase__ : List[Any] = re.sub(obja.lower() , obja.lower() , lowercase__ )
lowercase__ : Any = re.sub(obja.upper() , obja.upper() , lowercase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
lowercase__ : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase__ : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase__ : Tuple = start_index + 1
if overwrite and len(lowercase__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase__ )
return diffs
def __lowerCamelCase ( lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = glob.glob(os.path.join(lowercase__ , "**/*.py" ) , recursive=lowercase__ )
lowercase__ : Union[str, Any] = []
for filename in all_files:
lowercase__ : Union[str, Any] = is_copy_consistent(lowercase__ , lowercase__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowercase__ ) > 0:
lowercase__ : Optional[Any] = "\n".join(lowercase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 702 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("String lengths must match!" )
lowercase__ : List[str] = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = 0
while b > 0:
if b & 1:
lowercase__ : Union[str, Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 704 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import re
import string
import numpy as np
import datasets
lowerCAmelCase__ = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowerCAmelCase__ = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowerCAmelCase__ = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__(datasets.Metric ):
"""simple docstring"""
def snake_case ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase__ : Optional[int] = np.array([re.sub(SCREAMING_SNAKE_CASE , "" , SCREAMING_SNAKE_CASE ) for x in predictions] )
lowercase__ : Tuple = np.array([re.sub(SCREAMING_SNAKE_CASE , "" , SCREAMING_SNAKE_CASE ) for x in references] )
else:
lowercase__ : Any = np.asarray(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE )
if ignore_case:
lowercase__ : List[Any] = np.char.lower(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = np.char.lower(SCREAMING_SNAKE_CASE )
if ignore_punctuation:
lowercase__ : str = string.punctuation.maketrans("" , "" , string.punctuation )
lowercase__ : List[str] = np.char.translate(SCREAMING_SNAKE_CASE , table=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = np.char.translate(SCREAMING_SNAKE_CASE , table=SCREAMING_SNAKE_CASE )
if ignore_numbers:
lowercase__ : Optional[Any] = string.digits.maketrans("" , "" , string.digits )
lowercase__ : Union[str, Any] = np.char.translate(SCREAMING_SNAKE_CASE , table=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = np.char.translate(SCREAMING_SNAKE_CASE , table=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = predictions == references
return {"exact_match": np.mean(SCREAMING_SNAKE_CASE ) * 100}
| 706 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
import string
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = ""
for i in sequence:
lowercase__ : List[str] = ord(lowercase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = string.ascii_letters
lowercase__ : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase_ )] if c in letters else c for c in sequence )
def __lowerCamelCase ( ):
"""simple docstring"""
from timeit import timeit
print("Running performance benchmarks..." )
lowercase__ : List[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase_ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=lowercase_ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 707 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 0 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = np.shape(__UpperCamelCase )
if rows != columns:
lowercase__ : str = (
"\'table\' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(__UpperCamelCase )
lowercase__ : Optional[int] = np.zeros((rows, columns) )
lowercase__ : int = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
lowercase__ : List[str] = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowercase__ : Union[str, Any] = (table[i][j] - total) / upper[j][j]
lowercase__ : Any = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
lowercase__ : Tuple = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
lowercase__ : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class snake_case__(_lowerCAmelCase ):
"""simple docstring"""
lowercase_ = 'git_vision_model'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=768 , SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : List[str]=224 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE : Any=1E-5 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Dict=0.02 , **SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(**_lowerCAmelCase )
lowercase__ : Optional[int] = hidden_size
lowercase__ : str = intermediate_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : int = image_size
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : Any = hidden_act
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : int ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowercase__ , lowercase__ : Any = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class snake_case__(_lowerCAmelCase ):
"""simple docstring"""
lowercase_ = 'git'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[Any]=30_522 , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : Any=6 , SCREAMING_SNAKE_CASE : Optional[Any]=12 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : int=1E-1_2 , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=101 , SCREAMING_SNAKE_CASE : Dict=102 , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : int , ):
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
if vision_config is None:
lowercase__ : Dict = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ : Tuple = GitVisionConfig(**_lowerCAmelCase )
lowercase__ : List[Any] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : List[Any] = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Dict = use_cache
lowercase__ : Optional[Any] = tie_word_embeddings
lowercase__ : int = num_image_with_embedding
lowercase__ : Optional[int] = bos_token_id
lowercase__ : List[str] = eos_token_id
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.vision_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 709 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ = 1 , lowerCamelCase__ = 1_000 ):
"""simple docstring"""
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = 0
for divide_by_number in range(snake_case_ , digit + 1 ):
lowercase__ : list[int] = []
lowercase__ : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case_ ):
lowercase__ : Optional[Any] = len(snake_case_ )
lowercase__ : Optional[Any] = divide_by_number
else:
has_been_divided.append(snake_case_ )
lowercase__ : Tuple = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : Optional[int] = bertabert.config.encoder.vocab_size
lowercase__ : str = tokenizer.sep_token_id
lowercase__ : Any = tokenizer.cls_token_id
lowercase__ : Optional[int] = 128
lowercase__ : int = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Any = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Any = val_dataset.select(range(16 ) )
lowercase__ : List[Any] = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : Dict = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCamelCase__ , max_length=512 )
lowercase__ : Any = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCamelCase__ , max_length=128 )
lowercase__ : Optional[Any] = inputs.input_ids
lowercase__ : List[Any] = inputs.attention_mask
lowercase__ : Optional[Any] = outputs.input_ids
lowercase__ : List[Any] = outputs.input_ids.copy()
lowercase__ : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(UpperCamelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : int = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[str] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowercase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowercase__ : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase__ ) )] ) / len(UpperCamelCase__ )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : Any = self.get_auto_remove_tmp_dir()
lowercase__ : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase__ , per_device_train_batch_size=UpperCamelCase__ , per_device_eval_batch_size=UpperCamelCase__ , predict_with_generate=UpperCamelCase__ , evaluation_strategy="steps" , do_train=UpperCamelCase__ , do_eval=UpperCamelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : List[str] = SeqaSeqTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , tokenizer=UpperCamelCase__ , )
# start training
trainer.train()
| 711 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
lowerCAmelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
lowercase_ = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase_ = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase_ = field(default=3_2 , metadata={"""help""": """The size of the square patches to use for masking."""} )
lowercase_ = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def snake_case ( self : Tuple ):
lowercase__ : Tuple = {}
if self.train_dir is not None:
lowercase__ : List[Any] = self.train_dir
if self.validation_dir is not None:
lowercase__ : Optional[int] = self.validation_dir
lowercase__ : Optional[int] = data_files if data_files else None
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCAmelCase__ )} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
lowercase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ = field(default=lowerCAmelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
lowercase_ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , )
class snake_case__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=192 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : List[Any]=0.6 ):
lowercase__ : Any = input_size
lowercase__ : Dict = mask_patch_size
lowercase__ : Tuple = model_patch_size
lowercase__ : List[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
lowercase__ : List[Any] = self.input_size // self.mask_patch_size
lowercase__ : Tuple = self.mask_patch_size // self.model_patch_size
lowercase__ : int = self.rand_size**2
lowercase__ : List[str] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Optional[int] ):
lowercase__ : int = np.random.permutation(self.token_count )[: self.mask_count]
lowercase__ : List[str] = np.zeros(self.token_count , dtype=_lowerCamelCase )
lowercase__ : Any = 1
lowercase__ : List[str] = mask.reshape((self.rand_size, self.rand_size) )
lowercase__ : Any = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples] )
lowercase__ : Optional[Any] = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
lowercase__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ : List[str] = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase_ ) and data_args.train_val_split > 0.0:
lowercase__ : str = ds["train"].train_test_split(data_args.train_val_split )
lowercase__ : List[Any] = split["train"]
lowercase__ : List[Any] = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Any = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowerCamelCase_ )
elif model_args.model_name_or_path:
lowercase__ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
lowercase__ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowerCamelCase_ , "decoder_type" ):
lowercase__ : Union[str, Any] = "simmim"
# adapt config
lowercase__ : Tuple = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase__ : Optional[int] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase__ : Any = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ : Any = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
lowercase__ : Dict = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
lowercase__ : Union[str, Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase__ : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase__ : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase__ : List[Any] = AutoModelForMaskedImageModeling.from_config(lowerCamelCase_ )
if training_args.do_train:
lowercase__ : Any = ds["train"].column_names
else:
lowercase__ : Optional[Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
lowercase__ : Optional[int] = data_args.image_column_name
elif "image" in column_names:
lowercase__ : Optional[Any] = "image"
elif "img" in column_names:
lowercase__ : Union[str, Any] = "img"
else:
lowercase__ : str = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase__ : Any = Compose(
[
Lambda(lambda lowerCamelCase__ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase__ : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowerCamelCase__ ):
lowercase__ : List[Any] = [transforms(lowerCamelCase_ ) for image in examples[image_column_name]]
lowercase__ : Dict = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowercase__ : Dict = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowercase__ : Any = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase_ )
# Initialize our trainer
lowercase__ : Optional[int] = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
lowercase__ : str = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Tuple = last_checkpoint
lowercase__ : Any = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , lowerCamelCase_ )
trainer.save_metrics("eval" , lowerCamelCase_ )
# Write model card and (optionally) push to hub
lowercase__ : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
if __name__ == "__main__":
main()
| 712 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=7 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=18 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : str=400 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , ):
lowercase__ : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
lowercase__ : Dict = parent
lowercase__ : str = batch_size
lowercase__ : int = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : List[str] = do_resize
lowercase__ : Dict = size
lowercase__ : List[str] = apply_ocr
def snake_case ( self : int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case__(_A , unittest.TestCase ):
"""simple docstring"""
lowercase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "apply_ocr" ) )
def snake_case ( self : Any ):
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : List[Any] ):
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE )
# Test batched
lowercase__ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self : Tuple ):
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self : Optional[int] ):
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def snake_case ( self : Optional[int] ):
# with apply_OCR = True
lowercase__ : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase__ : Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowercase__ : str = Image.open(ds[0]["file"] ).convert("RGB" )
lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase__ : str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowercase__ : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE )
# with apply_OCR = False
lowercase__ : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 713 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class snake_case__(__lowercase ):
"""simple docstring"""
lowercase_ = 'data2vec-text'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=30_522 , SCREAMING_SNAKE_CASE : List[str]=768 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=512 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Dict=0.02 , SCREAMING_SNAKE_CASE : int=1E-1_2 , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Tuple="absolute" , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : Any = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : int = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : Any = position_embedding_type
lowercase__ : Any = use_cache
lowercase__ : Optional[int] = classifier_dropout
class snake_case__(__lowercase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowercase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 714 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowercase__ : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ , lowercase__ : Optional[Any] = image[0].size
lowercase__ , lowercase__ : Any = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowercase__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowercase__ : Optional[int] = np.concatenate(UpperCamelCase__ , axis=0 )
lowercase__ : Optional[int] = np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0
lowercase__ : List[str] = image.transpose(0 , 3 , 1 , 2 )
lowercase__ : str = 2.0 * image - 1.0
lowercase__ : Any = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ : Tuple = torch.cat(UpperCamelCase__ , dim=0 )
return image
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowercase__ : Dict = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowercase__ , lowercase__ : Any = mask[0].size
lowercase__ , lowercase__ : Union[str, Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : Optional[int] = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
lowercase__ : Any = np.concatenate(UpperCamelCase__ , axis=0 )
lowercase__ : str = mask.astype(np.floataa ) / 255.0
lowercase__ : Any = 0
lowercase__ : Dict = 1
lowercase__ : Optional[int] = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
lowercase__ : str = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class snake_case__(UpperCamelCase__ ):
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 4_2
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : int = 250 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : int = 10 , SCREAMING_SNAKE_CASE : int = 10 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : int = image
lowercase__ : int = _preprocess_image(__A )
lowercase__ : List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
lowercase__ : Any = _preprocess_mask(__A )
lowercase__ : Optional[int] = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowercase__ : Tuple = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ : Optional[int] = original_image.shape
lowercase__ : Dict = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
lowercase__ : Optional[Any] = eta
lowercase__ : Dict = self.scheduler.timesteps[0] + 1
lowercase__ : Optional[Any] = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowercase__ : int = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
lowercase__ : Tuple = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowercase__ : Optional[Any] = self.scheduler.undo_step(__A , __A , __A )
lowercase__ : Dict = t
lowercase__ : int = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Union[str, Any] = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 715 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=13 , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : List[str]=[32, 64, 128] , SCREAMING_SNAKE_CASE : List[Any]=[1, 2, 1] , SCREAMING_SNAKE_CASE : Union[str, Any]=[2, 2, 4] , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : str=2.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=10 , SCREAMING_SNAKE_CASE : List[Any]=8 , SCREAMING_SNAKE_CASE : str=["stage1", "stage2"] , SCREAMING_SNAKE_CASE : List[Any]=[1, 2] , ):
lowercase__ : Dict = parent
lowercase__ : List[Any] = batch_size
lowercase__ : int = image_size
lowercase__ : Any = patch_size
lowercase__ : Any = num_channels
lowercase__ : List[Any] = embed_dim
lowercase__ : Optional[int] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Optional[int] = num_heads
lowercase__ : Dict = window_size
lowercase__ : Dict = mlp_ratio
lowercase__ : Dict = qkv_bias
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[Any] = drop_path_rate
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : int = use_absolute_embeddings
lowercase__ : Union[str, Any] = patch_norm
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : Any = initializer_range
lowercase__ : Union[str, Any] = is_training
lowercase__ : List[str] = scope
lowercase__ : Tuple = use_labels
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : List[Any] = out_features
lowercase__ : Optional[Any] = out_indices
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Any = FocalNetModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(__SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : int = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase__ : Any = None
lowercase__ : Tuple = FocalNetBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : int = FocalNetForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : str = 1
lowercase__ : List[Any] = FocalNetForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = self.type_sequence_label_size
lowercase__ : Dict = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Union[str, Any] = 1
lowercase__ : Dict = FocalNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Optional[Any] ):
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(__UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = FocalNetModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=__SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : Optional[int] ):
return
def snake_case ( self : Tuple ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Any ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : List[Any] ):
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase__ : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = reshaped_hidden_states[0].shape
lowercase__ : Dict = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase__ : int = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = 3
lowercase__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase__ : List[str] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@slow
def snake_case ( self : str ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = FocalNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.default_image_processor
lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class snake_case__(__UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowercase_ = FocalNetConfig
lowercase_ = False
def snake_case ( self : Union[str, Any] ):
lowercase__ : Dict = FocalNetModelTester(self )
| 716 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=14 , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=99 , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Dict=37 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=512 , SCREAMING_SNAKE_CASE : List[str]=0.02 , ):
lowercase__ : List[Any] = parent
lowercase__ : Tuple = batch_size
lowercase__ : Tuple = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[int] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Optional[int] = rotary_dim
lowercase__ : int = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[Any] = None
lowercase__ : Any = vocab_size - 1
lowercase__ : List[Any] = vocab_size - 1
lowercase__ : Any = vocab_size - 1
def snake_case ( self : str ):
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def snake_case ( self : Any ):
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = 20
lowercase__ : List[str] = model_class_name(__A )
lowercase__ : str = model.init_cache(input_ids.shape[0] , __A )
lowercase__ : List[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowercase__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase__ : int = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
lowercase__ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase__ : List[Any] = model(
input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , )
lowercase__ : str = model(__A )
lowercase__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Union[str, Any] = 20
lowercase__ : Union[str, Any] = model_class_name(__A )
lowercase__ : str = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase__ : str = model.init_cache(input_ids.shape[0] , __A )
lowercase__ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
lowercase__ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase__ : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , )
lowercase__ : Union[str, Any] = model(__A , attention_mask=__A )
lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def snake_case ( self : Dict ):
lowercase__ : Dict = FlaxGPTJModelTester(self )
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__A , __A , __A , __A )
def snake_case ( self : int ):
for model_class_name in self.all_model_classes:
lowercase__ , lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__A , __A , __A , __A )
@tooslow
def snake_case ( self : str ):
lowercase__ : Dict = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowercase__ : str = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__A , truncation=__A )
lowercase__ : Dict = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowercase__ : Tuple = False
lowercase__ : Dict = model.config.eos_token_id
lowercase__ : Optional[int] = jax.jit(model.generate )
lowercase__ : Optional[int] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase__ : Dict = tokenizer.batch_decode(__A , skip_special_tokens=__A )
lowercase__ : List[str] = [
"Hello this is a long string of text.\n\nI\'m trying to get the text of the",
"Hey, I\'m a little late to the party. I\'m going to",
]
self.assertListEqual(__A , __A )
@is_pt_flax_cross_test
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase__ : List[str] = self._prepare_for_class(__A , __A )
lowercase__ : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Any = getattr(__A , __A )
lowercase__ , lowercase__ : Optional[Any] = pt_inputs["input_ids"].shape
lowercase__ : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
lowercase__ : List[Any] = 0
lowercase__ : Any = 1
lowercase__ : Optional[int] = 0
lowercase__ : Tuple = 1
lowercase__ : List[str] = pt_model_class(__A ).eval()
lowercase__ : List[str] = model_class(__A , dtype=jnp.floataa )
lowercase__ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A )
lowercase__ : Optional[int] = fx_state
with torch.no_grad():
lowercase__ : str = pt_model(**__A ).to_tuple()
lowercase__ : Dict = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__A )
lowercase__ : List[str] = model_class.from_pretrained(__A , from_pt=__A )
lowercase__ : List[Any] = fx_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def snake_case ( self : List[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase__ : Dict = self._prepare_for_class(__A , __A )
lowercase__ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ : Optional[int] = getattr(__A , __A )
lowercase__ : int = pt_model_class(__A ).eval()
lowercase__ : int = model_class(__A , dtype=jnp.floataa )
lowercase__ : Union[str, Any] = load_flax_weights_in_pytorch_model(__A , fx_model.params )
lowercase__ , lowercase__ : Dict = pt_inputs["input_ids"].shape
lowercase__ : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
lowercase__ : int = 0
lowercase__ : Optional[int] = 1
lowercase__ : int = 0
lowercase__ : List[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase__ : Tuple = pt_model(**__A ).to_tuple()
lowercase__ : Tuple = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__A )
lowercase__ : int = pt_model_class.from_pretrained(__A , from_flax=__A )
with torch.no_grad():
lowercase__ : int = pt_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def snake_case ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowercase__ : List[Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowercase__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase__ = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase__ = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase__ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class snake_case__(__snake_case ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase_ = DPRContextEncoderTokenizer
class snake_case__(__snake_case ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase_ = DPRQuestionEncoderTokenizer
lowerCAmelCase__ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase__ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase__ = r'''\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '''
@add_start_docstrings(__snake_case )
class snake_case__:
"""simple docstring"""
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Union[str, Any] = None , SCREAMING_SNAKE_CASE : Union[str, Any] = False , SCREAMING_SNAKE_CASE : Any = False , SCREAMING_SNAKE_CASE : List[Any] = None , SCREAMING_SNAKE_CASE : Tuple = None , SCREAMING_SNAKE_CASE : Optional[Any] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
lowercase__ : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
lowercase__ : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
lowercase__ : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
lowercase__ : Tuple = len(_lowercase )
lowercase__ : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts."""
lowercase__ : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
lowercase__ : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
lowercase__ : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
lowercase__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] = 16 , SCREAMING_SNAKE_CASE : str = 64 , SCREAMING_SNAKE_CASE : List[str] = 4 , ):
lowercase__ : Union[str, Any] = reader_input["""input_ids"""]
lowercase__ : Optional[int] = reader_output[:3]
lowercase__ : int = len(_lowercase )
lowercase__ : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
lowercase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ : int = sequence_ids.index(self.pad_token_id )
else:
lowercase__ : Optional[Any] = len(_lowercase )
lowercase__ : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , ):
lowercase__ : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ : str = sorted(_lowercase , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=_lowercase )
lowercase__ : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
lowercase__ : List[str] = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class snake_case__(__snake_case , __snake_case ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ = ["""input_ids""", """attention_mask"""]
lowercase_ = DPRReaderTokenizer
| 719 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=1.0 , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if rng is None:
lowercase__ : Union[str, Any] = global_rng
lowercase__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Optional[Any]=400 , SCREAMING_SNAKE_CASE : Tuple=2_000 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : int=160 , SCREAMING_SNAKE_CASE : Tuple=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=4_000 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[Any]=True , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : int = min_seq_length
lowercase__ : Optional[Any] = max_seq_length
lowercase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : List[str] = padding_value
lowercase__ : List[str] = sampling_rate
lowercase__ : Optional[int] = return_attention_mask
lowercase__ : Any = do_normalize
lowercase__ : Dict = feature_size
lowercase__ : Optional[Any] = chunk_length
lowercase__ : Any = hop_length
def snake_case ( self : Tuple ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : List[str]=False ):
def _flatten(SCREAMING_SNAKE_CASE : Any ):
return list(itertools.chain(*A__ ) )
if equal_length:
lowercase__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : Tuple = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = WhisperFeatureExtractor if is_speech_available() else None
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = WhisperFeatureExtractionTester(self )
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Union[str, Any] = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
lowercase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(A__ )
lowercase__ : str = feat_extract_first.to_dict()
lowercase__ : Union[str, Any] = feat_extract_second.to_dict()
lowercase__ : Optional[int] = feat_extract_first.mel_filters
lowercase__ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def snake_case ( self : str ):
lowercase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Union[str, Any] = os.path.join(A__ , "feat_extract.json" )
feat_extract_first.to_json_file(A__ )
lowercase__ : str = self.feature_extraction_class.from_json_file(A__ )
lowercase__ : Dict = feat_extract_first.to_dict()
lowercase__ : Tuple = feat_extract_second.to_dict()
lowercase__ : Union[str, Any] = feat_extract_first.mel_filters
lowercase__ : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def snake_case ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase__ : int = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : Union[str, Any] = feature_extractor(A__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
lowercase__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test batched
lowercase__ : Any = feature_extractor(A__ , return_tensors="np" ).input_features
lowercase__ : Optional[Any] = feature_extractor(A__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : List[Any] = np.asarray(A__ )
lowercase__ : List[str] = feature_extractor(A__ , return_tensors="np" ).input_features
lowercase__ : str = feature_extractor(A__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
# Test truncation required
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase__ : Tuple = [np.asarray(A__ ) for speech_input in speech_inputs]
lowercase__ : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ : List[str] = [np.asarray(A__ ) for speech_input in speech_inputs_truncated]
lowercase__ : Any = feature_extractor(A__ , return_tensors="np" ).input_features
lowercase__ : str = feature_extractor(A__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1E-3 ) )
def snake_case ( self : Union[str, Any] ):
import torch
lowercase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase__ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : Optional[int] = ds.sort("id" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def snake_case ( self : Any ):
# fmt: off
lowercase__ : Optional[int] = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowercase__ : Any = self._load_datasamples(1 )
lowercase__ : Optional[Any] = WhisperFeatureExtractor()
lowercase__ : Union[str, Any] = feature_extractor(A__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1E-4 ) )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Optional[Any] = self._load_datasamples(1 )[0]
lowercase__ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowercase__ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0]
self.assertTrue(np.all(np.mean(A__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1E-3 ) )
| 720 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCAmelCase__ ) < 2:
return collection
def circle_sort_util(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
lowercase__ : Tuple = False
if low == high:
return swapped
lowercase__ : str = low
lowercase__ : str = high
while left < right:
if collection[left] > collection[right]:
lowercase__ , lowercase__ : List[Any] = (
collection[right],
collection[left],
)
lowercase__ : Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase__ , lowercase__ : Optional[Any] = (
collection[right + 1],
collection[left],
)
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = low + int((high - low) / 2 )
lowercase__ : Tuple = circle_sort_util(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : Union[str, Any] = circle_sort_util(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ )
return swapped or left_swap or right_swap
lowercase__ : str = True
while is_not_sorted is True:
lowercase__ : str = circle_sort_util(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
def __lowerCamelCase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(_lowerCamelCase , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : List[str] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : str ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["torch"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls : str , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(cls , ["torch"] )
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase__ = {
"""n_samples""": 6_4,
"""horizon""": 3_2,
"""num_inference_steps""": 2_0,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
lowerCAmelCase__ = """hopper-medium-v2"""
lowerCAmelCase__ = gym.make(env_name)
lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowerCAmelCase__ = env.reset()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1_0_0_0
lowerCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase__ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
lowerCAmelCase__ = env.step(denorm_actions)
lowerCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 701 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ = 3 , lowerCamelCase__ = 7 , lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : Dict = 0
lowercase__ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
lowercase__ : Any = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase__ : Optional[Any] = current_numerator
lowercase__ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 702 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowerCamelCase ( lowerCamelCase__ = 3 ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowercase__ : Any = QuantumRegister(SCREAMING_SNAKE_CASE_ , "qr" )
lowercase__ : List[Any] = ClassicalRegister(SCREAMING_SNAKE_CASE_ , "cr" )
lowercase__ : Optional[int] = QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
lowercase__ : str = Aer.get_backend("qasm_simulator" )
lowercase__ : List[Any] = execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=10_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 703 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(__A , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase__ : List[str] = {'unk_token': '<unk>'}
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = 'lower newer'
lowercase__ : Optional[int] = 'lower newer'
return input_text, output_text
def snake_case ( self : int ):
lowercase__ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Any = 'lower newer'
lowercase__ : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase__ : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tokens + [tokenizer.unk_token]
lowercase__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Dict = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = 'lower newer'
# Testing tokenization
lowercase__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : Any = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
pass
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : List[str] = 'This is a simple input'
lowercase__ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Optional[int] = ('This is a simple input', 'This is a pair')
lowercase__ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Any = 'This is a simple input'
lowercase__ : Dict = ['This is a simple input looooooooong', 'This is a simple input']
lowercase__ : Optional[Any] = ('This is a simple input', 'This is a pair')
lowercase__ : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = '$$$'
lowercase__ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = 'This is a simple input'
lowercase__ : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : List[Any] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : Dict = tokenizer.decode(out_s.input_ids )
lowercase__ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : int ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ : List[Any] = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : Dict = 'Encode this.'
lowercase__ : Any = 'This one too please.'
lowercase__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = encoded_sequence_dict['input_ids']
lowercase__ : Any = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Optional[int] = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = 'A photo of a cat'
lowercase__ : Optional[int] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : List[str] = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Tuple ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = 'A photo of a cat'
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = 'bos'
lowercase__ : List[Any] = tokenizer.get_vocab()['bos']
lowercase__ : Any = 'A photo of a cat'
lowercase__ : int = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Optional[int] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 704 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class snake_case__:
"""simple docstring"""
lowercase_ = BlenderbotConfig
lowercase_ = {}
lowercase_ = """gelu"""
def __init__( self : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Dict=99 , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=37 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=20 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , ):
lowercase__ : int = parent
lowercase__ : int = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Tuple = is_training
lowercase__ : List[str] = use_labels
lowercase__ : Dict = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[Any] = eos_token_id
lowercase__ : str = pad_token_id
lowercase__ : Optional[Any] = bos_token_id
def snake_case ( self : str ):
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[str] = TFBlenderbotModel(config=_A ).get_decoder()
lowercase__ : Optional[Any] = inputs_dict["input_ids"]
lowercase__ : Union[str, Any] = input_ids[:1, :]
lowercase__ : Any = inputs_dict["attention_mask"][:1, :]
lowercase__ : Optional[Any] = inputs_dict["head_mask"]
lowercase__ : Any = 1
# first forward pass
lowercase__ : Optional[int] = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
lowercase__ , lowercase__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__ : List[Any] = model(_A , attention_mask=_A )[0]
lowercase__ : Union[str, Any] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__ : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1E-3 )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase__ : List[Any] = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case__(__lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def snake_case ( self : Any ):
lowercase__ : Dict = TFBlenderbotModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=_A )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Tuple ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = ["""My friends are cool but they eat too many carbs."""]
lowercase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def snake_case ( self : Any ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case ( self : Tuple ):
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
lowercase__ : List[str] = self.model.generate(
model_inputs.input_ids , )
lowercase__ : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 705 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : Optional[Any]=[30, 30] , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : int=8 , SCREAMING_SNAKE_CASE : Optional[int]=10 , ):
lowercase__ : Tuple = parent
lowercase__ : str = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = patch_size
lowercase__ : Dict = num_channels
lowercase__ : Dict = is_training
lowercase__ : Dict = use_labels
lowercase__ : Tuple = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Optional[Any] = scope
lowercase__ : int = n_targets
lowercase__ : List[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[int] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : Tuple ):
lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : Dict = []
for i in range(self.batch_size ):
lowercase__ : str = {}
lowercase__ : Dict = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case__ )
lowercase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=snake_case__ )
labels.append(snake_case__ )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : str ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
lowercase__ : str = YolosModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Optional[int] = YolosForObjectDetection(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase__ : Union[str, Any] = model(pixel_values=snake_case__ )
lowercase__ : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : Union[str, Any] = model(pixel_values=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(__a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase_ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=False ):
lowercase__ : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : List[str] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : int = {}
lowercase__ : Any = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case__ , dtype=torch.long )
lowercase__ : int = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case__ , dtype=torch.float )
labels.append(snake_case__ )
lowercase__ : int = labels
return inputs_dict
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = YolosModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(snake_case__ )
lowercase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def snake_case ( self : Tuple ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self : Any ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
# in YOLOS, the seq_len is different
lowercase__ : str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = True
lowercase__ : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Any = len(snake_case__ )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : int = True
lowercase__ : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase__ : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case__ ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase__ : str = outputs.hidden_states
lowercase__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# YOLOS has a different seq_length
lowercase__ : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def snake_case ( self : str ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case__ )
@slow
def snake_case ( self : Tuple ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = YolosModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def snake_case ( self : str ):
lowercase__ : Optional[int] = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(snake_case__ )
lowercase__ : Any = self.default_image_processor
lowercase__ : Tuple = prepare_img()
lowercase__ : List[str] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase__ : str = model(inputs.pixel_values )
# verify outputs
lowercase__ : Dict = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase__ : Optional[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=snake_case__ , )
lowercase__ : Dict = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case__ , atol=1E-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
snake_case__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : Optional[Any] = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(snake_case__ )
lowercase__ : List[str] = [75, 75, 17, 63, 17]
lowercase__ : Optional[Any] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(snake_case__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , snake_case__ , atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , snake_case__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , snake_case__ ) )
| 706 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = list(lowerCAmelCase_ )
lowercase__ : Tuple = list(lowerCAmelCase_ )
lowercase__ : int = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
lowercase__ : Dict = "_"
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = []
while True:
lowercase__ : Any = ["$"] * len(lowerCAmelCase_ )
lowercase__ : List[Any] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
lowercase__ : Tuple = compare_string(binary[i] , binary[j] )
if k is False:
lowercase__ : Dict = "*"
lowercase__ : Dict = "*"
temp.append("X" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
lowercase__ : Union[str, Any] = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = []
for minterm in minterms:
lowercase__ : Optional[int] = ""
for _ in range(lowerCAmelCase_ ):
lowercase__ : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = list(lowerCAmelCase_ )
lowercase__ : str = list(lowerCAmelCase_ )
lowercase__ : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = []
lowercase__ : int = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
lowercase__ : int = 0
lowercase__ : Optional[int] = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
lowercase__ : Union[str, Any] = j
if count == 1:
lowercase__ : Dict = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
lowercase__ : int = 0
temp.append(prime_implicants[i] )
while True:
lowercase__ : List[str] = 0
lowercase__ : List[Any] = -1
lowercase__ : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
lowercase__ : Any = chart[i].count(1 )
if count_n > max_n:
lowercase__ : int = count_n
lowercase__ : Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
lowercase__ : List[Any] = 0
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
lowercase__ : List[str] = prime_implicants[i].count("_" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
lowercase__ : Any = 1
return chart
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = int(input("Enter the no. of variables\n" ) )
lowercase__ : Optional[Any] = [
float(lowerCAmelCase_ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
lowercase__ : List[Any] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : Dict = check(lowerCAmelCase_ )
print("Prime Implicants are:" )
print(lowerCAmelCase_ )
lowercase__ : List[str] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : List[str] = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print("Essential Prime Implicants are:" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 707 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
requires_backends(self , ["sentencepiece"] )
class snake_case__(metaclass=_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Any ):
requires_backends(self , ["sentencepiece"] )
| 708 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Optional[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
lowercase__ : Union[str, Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_SCREAMING_SNAKE_CASE , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_SCREAMING_SNAKE_CASE , py_version="py36" , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ):
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# create estimator
lowercase__ : str = self.create_estimator(_SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
lowercase__ : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE )
| 709 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 0 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # noqa: E741
"""simple docstring"""
while r - l > 1:
lowercase__ : Union[str, Any] = (l + r) // 2
if v[m] >= key:
lowercase__ : List[str] = m
else:
lowercase__ : List[str] = m # noqa: E741
return r
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(A_ ) == 0:
return 0
lowercase__ : int = [0] * len(A_ )
lowercase__ : Optional[int] = 1
lowercase__ : str = v[0]
for i in range(1 , len(A_ ) ):
if v[i] < tail[0]:
lowercase__ : List[str] = v[i]
elif v[i] > tail[length - 1]:
lowercase__ : Tuple = v[i]
length += 1
else:
lowercase__ : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case__(snake_case__ ):
"""simple docstring"""
lowercase_ = '''realm'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=30_522 , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : int=128 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Dict=8 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Tuple="gelu_new" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-1_2 , SCREAMING_SNAKE_CASE : Tuple=256 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : Tuple=1E-3 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : Union[str, Any]=320 , SCREAMING_SNAKE_CASE : Dict=13_353_718 , SCREAMING_SNAKE_CASE : int=5_000 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : Dict=2 , **SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Dict = hidden_size
lowercase__ : Tuple = retriever_proj_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = num_candidates
lowercase__ : Dict = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = type_vocab_size
lowercase__ : Dict = layer_norm_eps
# Reader config
lowercase__ : int = span_hidden_size
lowercase__ : Union[str, Any] = max_span_width
lowercase__ : List[Any] = reader_layer_norm_eps
lowercase__ : str = reader_beam_size
lowercase__ : Any = reader_seq_len
# Retrieval config
lowercase__ : Optional[Any] = num_block_records
lowercase__ : int = searcher_beam_size
| 711 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
lowercase__ : Tuple = [x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def __lowerCamelCase ( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , __snake_case )),
(("transformer", "wte", "embedding"), P("mp" , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = _get_partition_rules()
lowercase__ : int = _replacement_rules(__snake_case )
lowercase__ : int = {k: _unmatched for k in flatten_dict(__snake_case )}
lowercase__ : Optional[Any] = {k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 712 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case__(lowercase_ ):
"""simple docstring"""
lowercase_ = """speech_to_text"""
lowercase_ = ["""past_key_values"""]
lowercase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int]=10_000 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : List[Any]=2_048 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Optional[Any]=6 , SCREAMING_SNAKE_CASE : int=2_048 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : Optional[Any]=256 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : List[str]=6_000 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[int]=(5, 5) , SCREAMING_SNAKE_CASE : int=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=80 , SCREAMING_SNAKE_CASE : Any=1 , **SCREAMING_SNAKE_CASE : int , ):
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Any = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : int = decoder_ffn_dim
lowercase__ : Any = decoder_layers
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : Any = activation_dropout
lowercase__ : Tuple = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = encoder_layerdrop
lowercase__ : Any = decoder_layerdrop
lowercase__ : str = use_cache
lowercase__ : int = encoder_layers
lowercase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : Any = max_source_positions
lowercase__ : Optional[int] = max_target_positions
lowercase__ : Tuple = num_conv_layers
lowercase__ : Union[str, Any] = list(lowerCamelCase_ )
lowercase__ : Dict = conv_channels
lowercase__ : Any = input_feat_per_channel
lowercase__ : Dict = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 713 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=7 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : int=99 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[Any]="last" , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=0 , ):
lowercase__ : Dict = parent
lowercase__ : str = batch_size
lowercase__ : List[str] = seq_length
lowercase__ : str = is_training
lowercase__ : Dict = use_input_lengths
lowercase__ : List[Any] = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Union[str, Any] = gelu_activation
lowercase__ : int = sinusoidal_embeddings
lowercase__ : List[Any] = causal
lowercase__ : List[Any] = asm
lowercase__ : Dict = n_langs
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = n_special
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : str = num_choices
lowercase__ : Union[str, Any] = summary_type
lowercase__ : Dict = use_proj
lowercase__ : Dict = scope
lowercase__ : List[Any] = bos_token_id
def snake_case ( self : int ):
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = None
if self.use_input_lengths:
lowercase__ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : str = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : List[Any] = None
lowercase__ : Union[str, Any] = None
lowercase__ : str = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self : Optional[int] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , ):
lowercase__ : str = XLMModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Optional[Any] = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
lowercase__ : int = model(UpperCamelCase_ , langs=UpperCamelCase_ )
lowercase__ : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : Any = XLMWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , ):
lowercase__ : Tuple = XLMForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Tuple = model(UpperCamelCase_ )
lowercase__ : Optional[Any] = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
lowercase__ : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , ):
lowercase__ : Dict = XLMForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Any = model(UpperCamelCase_ )
lowercase__ : int = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
lowercase__ : List[Any] = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((lowercase__ ) , ) : Optional[Any] = result_with_labels.to_tuple()
lowercase__ : Union[str, Any] = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((lowercase__ ) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ):
lowercase__ : List[str] = XLMForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Tuple = model(UpperCamelCase_ )
lowercase__ : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , ):
lowercase__ : List[Any] = self.num_labels
lowercase__ : Any = XLMForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , ):
lowercase__ : Tuple = self.num_choices
lowercase__ : List[Any] = XLMForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Tuple ):
lowercase__ : str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Tuple = config_and_inputs
lowercase__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class snake_case__(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ):
lowercase__ : Any = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
lowercase__ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = XLMModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCamelCase_ )
def snake_case ( self : Tuple ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCamelCase_ )
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCamelCase_ )
def snake_case ( self : str ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCamelCase_ )
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCamelCase_ )
def snake_case ( self : Optional[Any] ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCamelCase_ )
def snake_case ( self : Any ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCamelCase_ )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : List[Any]=1 ):
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(
[isinstance(UpperCamelCase_ , UpperCamelCase_ ) for iter_attentions in attentions] , [True] * len(UpperCamelCase_ ) )
self.assertEqual(len(UpperCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCamelCase_ ):
# adds PAD dummy token
lowercase__ : Any = min_length + idx + 1
lowercase__ : Dict = min_length + idx + 1
lowercase__ : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCamelCase_ ) )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=1 ):
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(
[isinstance(UpperCamelCase_ , UpperCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(UpperCamelCase_ ) , )
self.assertEqual(len(UpperCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCamelCase_ ):
# adds PAD dummy token
lowercase__ : int = min_length + idx + 1
lowercase__ : str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCamelCase_ ) , )
pass
@slow
def snake_case ( self : Union[str, Any] ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = XLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(UpperCamelCase_ )
lowercase__ : List[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=UpperCamelCase_ ) # the president
lowercase__ : Optional[int] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase__ : List[str] = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCamelCase_ )
| 714 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = F"""{sampling_rate}"""
lowercase__ : List[Any] = "1"
lowercase__ : List[str] = "f32le"
lowercase__ : Optional[int] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ : Union[str, Any] = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowercase__ : Optional[int] = output_stream[0]
lowercase__ : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "f32le" , ):
"""simple docstring"""
lowercase__ : List[Any] = F"""{sampling_rate}"""
lowercase__ : Optional[int] = "1"
if format_for_conversion == "s16le":
lowercase__ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
lowercase__ : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ : List[Any] = platform.system()
if system == "Linux":
lowercase__ : Union[str, Any] = "alsa"
lowercase__ : Any = "default"
elif system == "Darwin":
lowercase__ : int = "avfoundation"
lowercase__ : Any = ":0"
elif system == "Windows":
lowercase__ : List[str] = "dshow"
lowercase__ : List[str] = "default"
lowercase__ : Union[str, Any] = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowercase__ : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ : Tuple = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
lowercase__ : List[Any] = stream_chunk_s
else:
lowercase__ : Tuple = chunk_length_s
lowercase__ : int = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
lowercase__ : Optional[int] = np.intaa
lowercase__ : List[str] = 2
elif format_for_conversion == "f32le":
lowercase__ : Any = np.floataa
lowercase__ : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ : Tuple = chunk_length_s / 6
lowercase__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
lowercase__ : List[str] = [stride_length_s, stride_length_s]
lowercase__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ : Optional[int] = datetime.datetime.now()
lowercase__ : Union[str, Any] = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
lowercase__ : List[Any] = np.frombuffer(item["raw"] , dtype=a__ )
lowercase__ : List[Any] = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowercase__ : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[str] = b""
lowercase__ , lowercase__ : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ : Tuple = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
lowercase__ : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
lowercase__ : List[Any] = (_stride_left, stride_right)
lowercase__ : Dict = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowercase__ : str = False
yield item
lowercase__ : Tuple = stride_left
lowercase__ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
lowercase__ : Optional[Any] = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowercase__ : List[Any] = False
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
lowercase__ : Tuple = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 715 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : Union[str, Any] = None , SCREAMING_SNAKE_CASE : Optional[Any] = None , SCREAMING_SNAKE_CASE : Optional[int] = True , ):
lowercase__ : Tuple = [file for file in os.listdir(lowerCAmelCase_ ) if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )]
if identifier is not None:
lowercase__ : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for n_ in n_identifier:
lowercase__ : List[Any] = [file for file in files if n_ not in file]
else:
lowercase__ : Tuple = [file for file in files if n_identifier not in file]
lowercase__ : Optional[int] = ignore_files or []
ignore_files.append("__init__.py" )
lowercase__ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowerCAmelCase_ )
if only_modules:
lowercase__ : List[str] = file.split("." )[0]
try:
lowercase__ : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase__ : List[Any] = doctest.DocTestSuite(lowerCAmelCase_ )
lowercase__ : Union[str, Any] = unittest.TextTestRunner().run(lowerCAmelCase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
lowercase__ : Optional[int] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case ( self : int ):
lowercase__ : List[str] = Path("src/transformers" )
lowercase__ : List[Any] = "modeling"
lowercase__ : List[str] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ , ignore_files=lowerCAmelCase_ )
def snake_case ( self : Any ):
lowercase__ : str = Path("src/transformers" )
lowercase__ : Any = "tokenization"
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ )
def snake_case ( self : Dict ):
lowercase__ : str = Path("src/transformers" )
lowercase__ : Optional[Any] = "configuration"
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = Path("src/transformers" )
lowercase__ : Dict = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowerCAmelCase_ , n_identifier=lowerCAmelCase_ )
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = Path("docs/source" )
lowercase__ : Optional[int] = ["favicon.ico"]
self.analyze_directory(lowerCAmelCase_ , ignore_files=lowerCAmelCase_ , only_modules=lowerCAmelCase_ )
| 716 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[Any] = val
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ : Dict = value
else:
lowercase__ : Tuple = value
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ : List[str] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Optional[Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[:256, :]
lowercase__ : int = in_proj_bias[:256]
lowercase__ : int = in_proj_weight[256:512, :]
lowercase__ : int = in_proj_bias[256:512]
lowercase__ : Tuple = in_proj_weight[-256:, :]
lowercase__ : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[:256, :]
lowercase__ : Dict = in_proj_bias[:256]
lowercase__ : Any = in_proj_weight[256:512, :]
lowercase__ : List[str] = in_proj_bias[256:512]
lowercase__ : int = in_proj_weight[-256:, :]
lowercase__ : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ : Optional[Any] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowercase__ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ : Tuple = in_proj_weight_cross_attn[:256, :]
lowercase__ : int = in_proj_bias_cross_attn[:256]
lowercase__ : Any = in_proj_weight_cross_attn[256:512, :]
lowercase__ : List[Any] = in_proj_bias_cross_attn[256:512]
lowercase__ : List[Any] = in_proj_weight_cross_attn[-256:, :]
lowercase__ : int = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = image.size
lowercase__ : str = max(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = 800 if '''detection''' in checkpoint_url else 1_000
lowercase__ : int = target_max_size / current_max_size
lowercase__ : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = F.to_tensor(lowerCamelCase__ )
lowercase__ : Any = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
lowercase__ : Any = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ : List[str] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[Any] = val
# create HuggingFace model and load state dict
lowercase__ : Union[str, Any] = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ : Optional[Any] = 15
lowercase__ : str = 2
lowercase__ : Dict = {0: '''table''', 1: '''table rotated'''}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
else:
lowercase__ : Union[str, Any] = 125
lowercase__ : Union[str, Any] = 6
lowercase__ : Union[str, Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
lowercase__ : Optional[Any] = idalabel
lowercase__ : List[str] = {v: k for k, v in idalabel.items()}
lowercase__ : Any = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
lowercase__ : Any = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
lowercase__ : Any = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
lowercase__ : Union[str, Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=lowerCamelCase__ )
lowercase__ : Any = Image.open(lowerCamelCase__ ).convert("RGB" )
lowercase__ : Dict = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
lowercase__ : Dict = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
lowercase__ : int = (1, 15, 3)
lowercase__ : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowercase__ : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowercase__ : Dict = (1, 125, 7)
lowercase__ : Optional[int] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowercase__ : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ : Tuple = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 717 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = StableDiffusionInstructPixaPixPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
lowercase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ : Optional[int] = CLIPTextModel(snake_case__ )
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=0 ):
lowercase__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Any = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" )
if str(snake_case__ ).startswith("mps" ):
lowercase__ : str = torch.manual_seed(snake_case__ )
else:
lowercase__ : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Union[str, Any] ):
lowercase__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : int = self.get_dummy_components()
lowercase__ : int = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : int = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any = self.get_dummy_inputs(snake_case__ )
lowercase__ : int = sd_pipe(**snake_case__ ).images
lowercase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : int = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : List[Any] ):
lowercase__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : int = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[Any] = self.get_dummy_inputs(snake_case__ )
lowercase__ : Tuple = "french fries"
lowercase__ : Dict = sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
lowercase__ : Tuple = output.images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : str = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : List[str] ):
lowercase__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : Tuple = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Dict = self.get_dummy_inputs(snake_case__ )
lowercase__ : List[Any] = [inputs["prompt"]] * 2
lowercase__ : Optional[int] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
lowercase__ : int = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
lowercase__ : Union[str, Any] = image / 2 + 0.5
lowercase__ : List[str] = image.permute(0 , 3 , 1 , 2 )
lowercase__ : Dict = image.repeat(2 , 1 , 1 , 1 )
lowercase__ : List[Any] = sd_pipe(**snake_case__ ).images
lowercase__ : List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : Tuple ):
lowercase__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.get_dummy_components()
lowercase__ : List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
lowercase__ : str = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : List[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Optional[int] = self.get_dummy_inputs(snake_case__ )
lowercase__ : Any = sd_pipe(**snake_case__ ).images
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(snake_case__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
lowercase__ : List[Any] = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ )
lowercase__ : List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Dict = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) )[0]
lowercase__ : Union[str, Any] = components["vae"]
lowercase__ : List[str] = self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ : str = pipe(**snake_case__ )[0]
lowercase__ : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case__ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any=0 ):
lowercase__ : Any = torch.manual_seed(snake_case__ )
lowercase__ : str = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
lowercase__ : str = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : List[Any] = self.get_inputs()
lowercase__ : Tuple = pipe(**snake_case__ ).images
lowercase__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self : Optional[Any] ):
lowercase__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
lowercase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : Dict = self.get_inputs()
lowercase__ : Any = pipe(**snake_case__ ).images
lowercase__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any] = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self : Optional[Any] ):
lowercase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
lowercase__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : int = self.get_inputs()
lowercase__ : Tuple = pipe(**snake_case__ ).images
lowercase__ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : str = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = 0
def callback_fn(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> None:
lowercase__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ : Tuple = latents[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ : str = latents[0, -3:, -3:, -1]
lowercase__ : Dict = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase__ : Dict = False
lowercase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowercase__ : Dict = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : List[str] = self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case ( self : Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
lowercase__ : Any = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ : List[str] = self.get_inputs()
lowercase__ : str = pipe(**snake_case__ )
lowercase__ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case ( self : Optional[int] ):
lowercase__ : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ : List[str] = inputs["image"].resize((504, 504) )
lowercase__ : Tuple = "timbrooks/instruct-pix2pix"
lowercase__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowercase__ : Tuple = pipe(**snake_case__ )
lowercase__ : Optional[int] = output.images[0]
lowercase__ : Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ : int = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 0 |
from __future__ import annotations
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] = 0 ):
lowercase__ : Tuple = key
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
lowercase__ : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case_ ) ^ key ) for ch in content]
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
lowercase__ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case_ ) ^ key ) for ch in content]
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str = 0 ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
lowercase__ : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ : Optional[Any] = ""
for ch in content:
ans += chr(ord(snake_case_ ) ^ key )
return ans
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any = 0 ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
lowercase__ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ : List[str] = ""
for ch in content:
ans += chr(ord(snake_case_ ) ^ key )
return ans
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] = 0 ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
try:
with open(snake_case_ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(snake_case_ , snake_case_ ) )
except OSError:
return False
return True
def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
try:
with open(snake_case_ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(snake_case_ , snake_case_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 720 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Tuple , ):
lowercase__ : List[str] = parent
lowercase__ : Union[str, Any] = 13
lowercase__ : Union[str, Any] = 7
lowercase__ : Any = 30
lowercase__ : Optional[int] = self.seq_length + self.mem_len
lowercase__ : Dict = 15
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = 99
lowercase__ : Optional[Any] = [10, 50, 80]
lowercase__ : Union[str, Any] = 32
lowercase__ : Any = 32
lowercase__ : int = 4
lowercase__ : Dict = 8
lowercase__ : Any = 128
lowercase__ : Dict = 2
lowercase__ : str = 2
lowercase__ : str = None
lowercase__ : Tuple = 1
lowercase__ : Optional[int] = 0
lowercase__ : int = 3
lowercase__ : Any = self.vocab_size - 1
lowercase__ : Dict = 0.01
def snake_case ( self : Optional[int] ):
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Any = None
if self.use_labels:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self : List[Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = TFTransfoXLModel(UpperCAmelCase__ )
lowercase__ : Optional[Any] = model(UpperCAmelCase__ ).to_tuple()
lowercase__ : Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase__ : Optional[Any] = model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[Any] = TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase__ : Union[str, Any] = model(UpperCAmelCase__ ).to_tuple()
lowercase__ : List[str] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase__ : Optional[int] = model(UpperCAmelCase__ ).to_tuple()
lowercase__ : Dict = model([input_ids_a, mems_a] ).to_tuple()
lowercase__ : Optional[int] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase__ : Optional[Any] = model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase__ : Union[str, Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : str ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(lowercase__) : int = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class snake_case__(lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase_ = () if is_tf_available() else ()
lowercase_ = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self : List[Any] ):
lowercase__ : Any = TFTransfoXLModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : int ):
self.model_tester.set_seed()
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def snake_case ( self : Optional[Any] ):
self.model_tester.set_seed()
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase__ : str = model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase__ : Tuple = model.get_bias()
assert name is None
else:
lowercase__ : List[Any] = model.get_output_embeddings()
assert x is None
lowercase__ : int = model.get_bias()
assert name is None
def snake_case ( self : Union[str, Any] ):
pass
@slow
def snake_case ( self : int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="This model doesn\'t play well with fit() due to not returning a single loss." )
def snake_case ( self : List[Any] ):
pass
@require_tf
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def snake_case ( self : int ):
lowercase__ : Optional[int] = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
lowercase__ : Optional[int] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__ : Tuple = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__ : Dict = model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
if config_name_or_path is None:
lowercase__ : List[Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowercase__ : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase__ : Union[str, Any] = question_encoder_name_or_path
lowercase__ : List[str] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowercase__ : int = RagConfig.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
lowercase__ : Any = AutoConfig.from_pretrained(_lowerCAmelCase )
lowercase__ : Tuple = gen_config
lowercase__ : Optional[Any] = question_encoder_config
lowercase__ : str = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
lowercase__ : Any = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ = 3 , lowerCamelCase__ = 7 , lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
lowercase__ : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase__ : List[Any] = current_numerator
lowercase__ : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 701 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 0 |
import argparse
import datetime
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
lowercase__ : List[Any] = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
lowercase__ : List[Any] = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be \'-\' or \'/\'" )
# Get day
lowercase__ : Any = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
lowercase__ : List[str] = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be \'-\' or \'/\'" )
# Get year
lowercase__ : Optional[Any] = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
lowercase__ : Tuple = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
lowercase__ : Optional[int] = y - 1
lowercase__ : List[Any] = m + 12
# maths var
lowercase__ : str = int(str(UpperCamelCase__ )[:2] )
lowercase__ : List[str] = int(str(UpperCamelCase__ )[2:] )
lowercase__ : List[str] = int(2.6 * m - 5.39 )
lowercase__ : Dict = int(c / 4 )
lowercase__ : Tuple = int(k / 4 )
lowercase__ : List[str] = int(d + k )
lowercase__ : int = int(t + u + v + x )
lowercase__ : Tuple = int(z - (2 * c) )
lowercase__ : Optional[int] = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
lowercase__ : Optional[int] = F"""Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCAmelCase__ = parser.parse_args()
zeller(args.date_input)
| 702 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=32 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : List[Any]=10 , SCREAMING_SNAKE_CASE : int=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : List[str]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : List[str]=None , ):
lowercase__ : List[Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Tuple = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embeddings_size
lowercase__ : Dict = hidden_sizes
lowercase__ : Union[str, Any] = depths
lowercase__ : Any = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = num_labels
lowercase__ : Optional[int] = scope
lowercase__ : Union[str, Any] = len(lowercase_ )
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : List[str] = self.get_config()
return config, pixel_values
def snake_case ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = FlaxRegNetModel(config=lowercase_ )
lowercase__ : Dict = model(lowercase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : Dict = FlaxRegNetForImageClassification(config=lowercase_ )
lowercase__ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Dict ):
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ : str = config_and_inputs
lowercase__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case__(snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Union[str, Any] = FlaxRegNetModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def snake_case ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : Dict ):
return
def snake_case ( self : Optional[Any] ):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case ( self : Dict ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def snake_case ( self : List[str] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def snake_case ( self : Optional[Any] ):
pass
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowercase_ )
lowercase__ : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def snake_case ( self : Optional[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Tuple = model_class(lowercase_ )
lowercase__ : Optional[int] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def snake_case ( self : int ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : str = model_class(lowercase_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
lowercase__ : Optional[Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ : Union[str, Any] = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : str = image_processor(images=lowercase_ , return_tensors="np" )
lowercase__ : Tuple = model(**lowercase_ )
# verify the logits
lowercase__ : List[str] = (1, 1_000)
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : int = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 703 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = [[0 for _ in range(lowerCamelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase__ : Dict = 1
for n in range(m + 1 ):
for k in range(1 , lowerCamelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 704 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(__lowercase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : Dict=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
lowercase__ : Optional[Any] = do_resize
lowercase__ : str = do_rescale
lowercase__ : List[str] = size_divisor
lowercase__ : str = resample
super().__init__(**SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : int = get_image_size(SCREAMING_SNAKE_CASE )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase__ : Dict = height // size_divisor * size_divisor
lowercase__ : str = width // size_divisor * size_divisor
lowercase__ : str = resize(SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return image
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = size_divisor if size_divisor is not None else self.size_divisor
lowercase__ : List[str] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
lowercase__ : int = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
lowercase__ : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE ) for img in images]
if do_resize:
lowercase__ : Optional[int] = [self.resize(SCREAMING_SNAKE_CASE , size_divisor=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : List[str] = [self.rescale(SCREAMING_SNAKE_CASE , scale=1 / 255 ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 705 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split("." ):
lowercase__ : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : List[Any] = value
elif weight_type == "weight_g":
lowercase__ : int = value
elif weight_type == "weight_v":
lowercase__ : Dict = value
elif weight_type == "bias":
lowercase__ : Any = value
elif weight_type == "running_mean":
lowercase__ : List[Any] = value
elif weight_type == "running_var":
lowercase__ : List[str] = value
elif weight_type == "num_batches_tracked":
lowercase__ : List[str] = value
elif weight_type == "inv_freq":
lowercase__ : Optional[int] = value
else:
lowercase__ : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = []
lowercase__ : str = fairseq_model.state_dict()
lowercase__ : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
lowercase__ : Any = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : Tuple = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ : Optional[int] = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2]
lowercase__ : List[Any] = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE )
if "pos_bias_u" in name:
lowercase__ : Tuple = None
elif "pos_bias_v" in name:
lowercase__ : int = None
elif "weight_g" in name:
lowercase__ : Optional[Any] = "weight_g"
elif "weight_v" in name:
lowercase__ : Union[str, Any] = "weight_v"
elif "bias" in name:
lowercase__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = "weight"
elif "running_mean" in name:
lowercase__ : List[Any] = "running_mean"
elif "inv_freq" in name:
lowercase__ : Optional[int] = "inv_freq"
elif "running_var" in name:
lowercase__ : List[Any] = "running_var"
elif "num_batches_tracked" in name:
lowercase__ : Dict = "num_batches_tracked"
else:
lowercase__ : Dict = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = full_name.split("conv_layers." )[-1]
lowercase__ : Optional[Any] = name.split("." )
lowercase__ : Optional[Any] = int(items[0] )
lowercase__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
"""simple docstring"""
if config_path is not None:
lowercase__ : List[Any] = WavaVecaConformerConfig.from_pretrained(_SCREAMING_SNAKE_CASE , hidden_act="swish" )
else:
lowercase__ : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase__ : Optional[Any] = "rotary"
if is_finetuned:
if dict_path:
lowercase__ : Optional[Any] = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Tuple = target_dict.pad_index
lowercase__ : Dict = target_dict.bos_index
lowercase__ : Any = target_dict.eos_index
lowercase__ : Any = len(target_dict.symbols )
lowercase__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , "vocab.json" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
lowercase__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : Optional[Any] = 0
lowercase__ : List[Any] = 1
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
lowercase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__ : Dict = WavaVecaConformerForCTC(_SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = WavaVecaConformerForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ : List[Any] = argparse.Namespace(task="audio_pretraining" )
lowercase__ : Tuple = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = model.config
lowercase__ : List[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ : List[Any] = MBartConfig(
is_decoder=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase__ , add_final_layer_norm=lowerCamelCase__ , )
return encoder_config, decoder_config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "encoder.model" in name:
lowercase__ : int = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
lowercase__ : Dict = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
lowercase__ : Any = """encoder.""" + name
if "attn.proj" in name:
lowercase__ : Any = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
lowercase__ : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Dict = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
lowercase__ : str = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase__ : Dict = """encoder.layernorm.bias"""
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[str] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : int = key.split("." )
lowercase__ : List[str] = int(key_split[3] )
lowercase__ : Optional[Any] = int(key_split[5] )
lowercase__ : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : Union[str, Any] = val[:dim, :]
lowercase__ : Optional[int] = val[dim : dim * 2, :]
lowercase__ : Union[str, Any] = val[-dim:, :]
else:
lowercase__ : List[Any] = val[:dim]
lowercase__ : Optional[Any] = val[dim : dim * 2]
lowercase__ : Dict = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ : List[str] = val
return orig_state_dict
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : Dict = DonutModel.from_pretrained(lowerCamelCase__ ).eval()
# load HuggingFace model
lowercase__ : List[str] = get_configs(lowerCamelCase__ )
lowercase__ : Union[str, Any] = DonutSwinModel(lowerCamelCase__ )
lowercase__ : Union[str, Any] = MBartForCausalLM(lowerCamelCase__ )
lowercase__ : Any = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
lowercase__ : List[Any] = original_model.state_dict()
lowercase__ : List[Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify results on scanned document
lowercase__ : str = load_dataset("hf-internal-testing/example-documents" )
lowercase__ : List[Any] = dataset["""test"""][0]["""image"""].convert("RGB" )
lowercase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase__ , from_slow=lowerCamelCase__ )
lowercase__ : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ : Optional[int] = DonutProcessor(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Tuple = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase__ : Union[str, Any] = """When is the coffee break?"""
lowercase__ : List[str] = task_prompt.replace("{user_input}" , lowerCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ : str = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ : str = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ : Any = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ : List[str] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ : int = """hello world"""
else:
raise ValueError("Model name not supported" )
lowercase__ : List[Any] = original_model.decoder.tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" )[
"""input_ids"""
]
lowercase__ : Tuple = original_model.encoder.model.patch_embed(lowerCamelCase__ )
lowercase__ : str = model.encoder.embeddings(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
# verify encoder hidden states
lowercase__ : Any = original_model.encoder(lowerCamelCase__ )
lowercase__ : Optional[int] = model.encoder(lowerCamelCase__ ).last_hidden_state
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-2 )
# verify decoder hidden states
lowercase__ : int = original_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).logits
lowercase__ : Dict = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = None
lowercase_ = None
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = True
lowercase_ = None
lowercase_ = 1
lowercase_ = None
lowercase_ = False
lowercase_ = None
lowercase_ = None
def snake_case ( self : Dict ):
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__ ) for k, v in self.__dict__.items()} )
| 708 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case__(a__ ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase_ , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase_ , "num_attention_heads" ) )
class snake_case__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=13 , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=640 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Any="silu" , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : str=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
lowercase__ : List[str] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Dict = num_channels
lowercase__ : Any = last_hidden_size
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = classifier_dropout_prob
lowercase__ : List[Any] = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : int = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = scope
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Optional[Any] = MobileViTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Optional[Any] = MobileViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Dict = self.num_labels
lowercase__ : str = MobileViTForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowercase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Dict = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case ( self : List[str] ):
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(a__ , a__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : str = MobileViTModelTester(self )
lowercase__ : int = MobileViTConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def snake_case ( self : List[str] ):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def snake_case ( self : Tuple ):
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowerCamelCase_ )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Dict ):
pass
def snake_case ( self : List[Any] ):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def snake_case ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ : Tuple = outputs.hidden_states
lowercase__ : Optional[Any] = 5
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : List[str] = 2
for i in range(len(lowerCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def snake_case ( self : Tuple ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
@slow
def snake_case ( self : str ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = MobileViTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Union[str, Any] ):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowercase__ : int = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase_ )
lowercase__ : str = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**lowerCamelCase_ )
# verify the logits
lowercase__ : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowercase__ : Any = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def snake_case ( self : List[str] ):
lowercase__ : int = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase__ : str = model.to(lowerCamelCase_ )
lowercase__ : Optional[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase__ : List[Any] = prepare_img()
lowercase__ : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**lowerCamelCase_ )
lowercase__ : List[str] = outputs.logits
# verify the logits
lowercase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowercase__ : Optional[int] = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=lowerCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase__ : Any = model.to(lowerCamelCase_ )
lowercase__ : List[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowercase__ : Any = prepare_img()
lowercase__ : int = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**lowerCamelCase_ )
lowercase__ : List[Any] = outputs.logits.detach().cpu()
lowercase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ , target_sizes=[(50, 60)] )
lowercase__ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
lowercase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowercase__ : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
| 709 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile('''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(r'''^\s*else:''')
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if _re_test_backend.search(lowerCamelCase__ ) is None:
return None
lowercase__ : str = [b[0] for b in _re_backend.findall(lowerCamelCase__ )]
backends.sort()
return "_and_".join(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ : Any = f.readlines()
lowercase__ : str = 0
while line_index < len(lowerCamelCase__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowercase__ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase__ ):
lowercase__ : str = _re_one_line_import_struct.search(lowerCamelCase__ ).groups()[0]
lowercase__ : Optional[int] = re.findall("\[([^\]]+)\]" , lowerCamelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowercase__ : List[Any] = _re_import_struct_key_value.search(lowerCamelCase__ )
if single_line_import_search is not None:
lowercase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ : Any = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowercase__ : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase__ ) is not None:
lowercase__ : Tuple = _re_import_struct_add_many.search(lowerCamelCase__ ).groups()[0].split(", " )
lowercase__ : Dict = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_between_brackets.search(lowerCamelCase__ ) is not None:
lowercase__ : Tuple = _re_between_brackets.search(lowerCamelCase__ ).groups()[0].split(", " )
lowercase__ : Dict = [obj[1:-1] for obj in imports if len(lowerCamelCase__ ) > 0]
objects.extend(lowerCamelCase__ )
elif _re_quote_object.search(lowerCamelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ : Optional[int] = []
while (
line_index < len(lowerCamelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowercase__ : Optional[Any] = lines[line_index]
lowercase__ : Union[str, Any] = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ : Dict = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowercase__ : List[str] = lines[line_index]
lowercase__ : str = _re_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
def find_duplicates(lowerCamelCase__ ):
return [k for k, v in collections.Counter(lowerCamelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ : int = []
for key in import_dict_objects.keys():
lowercase__ : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ : Tuple = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
lowercase__ : Any = os.path.join(lowerCamelCase__ , "__init__.py" )
lowercase__ : int = parse_init(lowerCamelCase__ )
if objects is not None:
lowercase__ : List[str] = analyze_results(*lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowercase__ : int = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > 0:
raise ValueError("\n\n".join(lowerCamelCase__ ) )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = []
for path, directories, files in os.walk(lowerCamelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCamelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase__ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowercase__ : Any = str((Path(lowerCamelCase__ ) / folder).relative_to(lowerCamelCase__ ) )
lowercase__ : List[str] = short_path.replace(os.path.sep , "." )
submodules.append(lowerCamelCase__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ : Dict = str((Path(lowerCamelCase__ ) / fname).relative_to(lowerCamelCase__ ) )
lowercase__ : Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCamelCase__ )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = importlib.util.spec_from_file_location(
"transformers" , os.path.join(lowerCamelCase__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ : List[str] = spec.loader.load_module()
lowercase__ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCamelCase__ ) > 0:
lowercase__ : Tuple = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__ : List[Any] = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
lowercase__ : Optional[Any] = grid[row_n]
lowercase__ : Dict = fill_row(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Tuple = grid[row_n]
return grid[-1][-1]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ : Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Dict = 0.0 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : Optional[Any] = "pil" , SCREAMING_SNAKE_CASE : Any = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _UpperCAmelCase ):
lowercase__ : List[str] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase__ : List[str] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ : List[str] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ : List[str] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , eta=_UpperCAmelCase , use_clipped_model_output=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 712 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any="relu" , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
lowercase__ : Dict = parent
lowercase__ : Tuple = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = num_channels
lowercase__ : Any = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : Optional[Any] = depths
lowercase__ : Union[str, Any] = is_training
lowercase__ : str = use_labels
lowercase__ : Tuple = hidden_act
lowercase__ : List[Any] = num_labels
lowercase__ : str = scope
lowercase__ : Tuple = len(_A )
def snake_case ( self : Dict ):
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Tuple = TFRegNetModel(config=_A )
lowercase__ : List[Any] = model(_A , training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[str] = self.num_labels
lowercase__ : Union[str, Any] = TFRegNetForImageClassification(_A )
lowercase__ : Optional[int] = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = TFRegNetModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A )
def snake_case ( self : Dict ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def snake_case ( self : int ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def snake_case ( self : int ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def snake_case ( self : str ):
pass
def snake_case ( self : List[Any] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_A )
lowercase__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def snake_case ( self : Any ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def snake_case ( self : Any ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Tuple = model_class(_A )
lowercase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) , training=_A )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]={} ):
lowercase__ : Dict = model(_A , return_dict=_A , **_A )
lowercase__ : Optional[Any] = model(_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A , _A ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(_A )
lowercase__ : List[Any] = self._prepare_for_class(_A , _A )
lowercase__ : Optional[Any] = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
lowercase__ : Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase__ : Optional[int] = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
lowercase__ : Any = self._prepare_for_class(_A , _A )
lowercase__ : Dict = self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
lowercase__ : int = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase__ : Any = self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
def snake_case ( self : Optional[Any] ):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def snake_case ( self : List[str] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : str ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=_A , return_tensors="tf" )
# forward pass
lowercase__ : List[Any] = model(**_A , training=_A )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
lowercase__ : Tuple = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 713 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
lowercase__ : Tuple = []
for old_item in old_list:
lowercase__ : Union[str, Any] = old_item.replace("in_layers.0" , "norm1" )
lowercase__ : Dict = new_item.replace("in_layers.2" , "conv1" )
lowercase__ : Union[str, Any] = new_item.replace("out_layers.0" , "norm2" )
lowercase__ : str = new_item.replace("out_layers.3" , "conv2" )
lowercase__ : List[Any] = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowercase__ : int = new_item.replace("skip_connection" , "conv_shortcut" )
lowercase__ : Optional[Any] = shave_segments(UpperCAmelCase__ , n_shave_prefix_segments=UpperCAmelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
lowercase__ : List[Any] = []
for old_item in old_list:
lowercase__ : Dict = old_item
lowercase__ : str = new_item.replace("norm.weight" , "group_norm.weight" )
lowercase__ : Union[str, Any] = new_item.replace("norm.bias" , "group_norm.bias" )
lowercase__ : Union[str, Any] = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowercase__ : Optional[Any] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowercase__ : Optional[int] = shave_segments(UpperCAmelCase__ , n_shave_prefix_segments=UpperCAmelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ : str = old_checkpoint[path]
lowercase__ : Optional[Any] = old_tensor.shape[0] // 3
lowercase__ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ : Optional[Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowercase__ : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ : int = query.reshape(UpperCAmelCase__ )
lowercase__ : Any = key.reshape(UpperCAmelCase__ )
lowercase__ : Optional[int] = value.reshape(UpperCAmelCase__ )
for path in paths:
lowercase__ : int = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ : str = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowercase__ : Any = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowercase__ : List[Any] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ : Optional[Any] = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ : Union[str, Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowercase__ : Any = old_checkpoint[path["old"]]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {}
lowercase__ : Any = checkpoint["time_embed.0.weight"]
lowercase__ : Optional[Any] = checkpoint["time_embed.0.bias"]
lowercase__ : Tuple = checkpoint["time_embed.2.weight"]
lowercase__ : Dict = checkpoint["time_embed.2.bias"]
lowercase__ : Union[str, Any] = checkpoint["input_blocks.0.0.weight"]
lowercase__ : List[Any] = checkpoint["input_blocks.0.0.bias"]
lowercase__ : List[Any] = checkpoint["out.0.weight"]
lowercase__ : str = checkpoint["out.0.bias"]
lowercase__ : Dict = checkpoint["out.2.weight"]
lowercase__ : Dict = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowercase__ : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowercase__ : List[Any] = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the middle blocks only
lowercase__ : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowercase__ : str = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the output blocks only
lowercase__ : int = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowercase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(UpperCAmelCase__ )
}
for i in range(1 , UpperCAmelCase__ ):
lowercase__ : Union[str, Any] = (i - 1) // (config["num_res_blocks"] + 1)
lowercase__ : int = (i - 1) % (config["num_res_blocks"] + 1)
lowercase__ : int = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
lowercase__ : str = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowercase__ : Any = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
lowercase__ : Dict = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
lowercase__ : Optional[Any] = renew_resnet_paths(UpperCAmelCase__ )
lowercase__ : List[Any] = {"old": F"""input_blocks.{i}.0""", "new": F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowercase__ : Union[str, Any] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase__ )
if len(UpperCAmelCase__ ):
lowercase__ : Optional[int] = renew_attention_paths(UpperCAmelCase__ )
lowercase__ : List[str] = {
"old": F"""input_blocks.{i}.1""",
"new": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase__ : Dict = {
F"""input_blocks.{i}.1.qkv.bias""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase__ , config=UpperCAmelCase__ , )
lowercase__ : Tuple = middle_blocks[0]
lowercase__ : List[Any] = middle_blocks[1]
lowercase__ : Dict = middle_blocks[2]
lowercase__ : List[Any] = renew_resnet_paths(UpperCAmelCase__ )
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , config=UpperCAmelCase__ )
lowercase__ : Union[str, Any] = renew_resnet_paths(UpperCAmelCase__ )
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , config=UpperCAmelCase__ )
lowercase__ : Optional[Any] = renew_attention_paths(UpperCAmelCase__ )
lowercase__ : List[str] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , attention_paths_to_split=UpperCAmelCase__ , config=UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
lowercase__ : str = i // (config["num_res_blocks"] + 1)
lowercase__ : Tuple = i % (config["num_res_blocks"] + 1)
lowercase__ : Union[str, Any] = [shave_segments(UpperCAmelCase__ , 2 ) for name in output_blocks[i]]
lowercase__ : Tuple = {}
for layer in output_block_layers:
lowercase__ , lowercase__ : int = layer.split("." )[0], shave_segments(UpperCAmelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase__ )
else:
lowercase__ : str = [layer_name]
if len(UpperCAmelCase__ ) > 1:
lowercase__ : Any = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
lowercase__ : List[Any] = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
lowercase__ : Tuple = renew_resnet_paths(UpperCAmelCase__ )
lowercase__ : Union[str, Any] = renew_resnet_paths(UpperCAmelCase__ )
lowercase__ : str = {"old": F"""output_blocks.{i}.0""", "new": F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , config=UpperCAmelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ : Optional[int] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowercase__ : List[str] = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
lowercase__ : str = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase__ ) == 2:
lowercase__ : List[str] = []
if len(UpperCAmelCase__ ):
lowercase__ : Tuple = renew_attention_paths(UpperCAmelCase__ )
lowercase__ : int = {
"old": F"""output_blocks.{i}.1""",
"new": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase__ : str = {
F"""output_blocks.{i}.1.qkv.bias""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=UpperCAmelCase__ , )
else:
lowercase__ : Union[str, Any] = renew_resnet_paths(UpperCAmelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ : Optional[int] = ".".join(["output_blocks", str(UpperCAmelCase__ ), path["old"]] )
lowercase__ : List[str] = ".".join(["up_blocks", str(UpperCAmelCase__ ), "resnets", str(UpperCAmelCase__ ), path["new"]] )
lowercase__ : Optional[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCAmelCase__ = json.loads(f.read())
lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCAmelCase__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 714 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ = 3 , lowerCamelCase__ = 7 , lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : Dict = 0
lowercase__ : List[str] = 1
for current_denominator in range(1 , limit + 1 ):
lowercase__ : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase__ : List[Any] = current_numerator
lowercase__ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 716 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 0 |
from typing import List
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"
+ "\n".join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowercase__ : str = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for group_idx in range(lowerCamelCase__ ):
lowercase__ : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase__ : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase__ : List[str] = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
lowercase__ : Union[str, Any] = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
lowercase__ : Any = {}
for size in list_sizes:
lowercase__ : Union[str, Any] = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase__ : Optional[Any] = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Any = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs
| 717 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = 2_5_6
class snake_case__(lowercase_ ):
"""simple docstring"""
lowercase_ = ['''melgan''']
def __init__( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__()
# From MELGAN
lowercase__ : List[str] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : Dict = 4.0 # Largest value for most examples
lowercase__ : Dict = 128
self.register_modules(
notes_encoder=SCREAMING_SNAKE_CASE , continuous_encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , melgan=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=(-1.0, 1.0) , SCREAMING_SNAKE_CASE : Dict=False ):
lowercase__ , lowercase__ : Dict = output_range
if clip:
lowercase__ : List[str] = torch.clip(SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=(-1.0, 1.0) , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ , lowercase__ : Any = input_range
lowercase__ : List[str] = torch.clip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
lowercase__ : List[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Tuple = input_tokens > 0
lowercase__ , lowercase__ : Union[str, Any] = self.notes_encoder(
encoder_input_tokens=SCREAMING_SNAKE_CASE , encoder_inputs_mask=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : int = self.continuous_encoder(
encoder_inputs=SCREAMING_SNAKE_CASE , encoder_inputs_mask=SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = noise_time
if not torch.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
lowercase__ : int = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : Any = self.decoder(
encodings_and_masks=SCREAMING_SNAKE_CASE , decoder_input_tokens=SCREAMING_SNAKE_CASE , decoder_noise_time=SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : Optional[int] = 100 , SCREAMING_SNAKE_CASE : List[str] = True , SCREAMING_SNAKE_CASE : str = "numpy" , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : List[Any] = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(SCREAMING_SNAKE_CASE )}.""" )
lowercase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : Dict = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase__ : Any = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : Optional[Any] = ones
lowercase__ : Optional[int] = self.scale_features(
SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=SCREAMING_SNAKE_CASE , continuous_mask=SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : Any = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : str = self.decode(
encodings_and_masks=SCREAMING_SNAKE_CASE , input_tokens=SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : int = self.scale_to_features(SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
lowercase__ : Dict = mel[:1]
lowercase__ : Union[str, Any] = mel.cpu().float().numpy()
lowercase__ : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info("Generated segment" , SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'." )
if output_type == "numpy":
lowercase__ : Any = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE )
| 719 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
requires_backends(self , ["bs4"] )
super().__init__(**SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : str = []
lowercase__ : int = []
lowercase__ : Optional[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase__ : int = parent.find_all(child.name , recursive=SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
lowercase__ : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Optional[Any] = BeautifulSoup(SCREAMING_SNAKE_CASE , "html.parser" )
lowercase__ : List[str] = []
lowercase__ : Dict = []
lowercase__ : Any = []
for element in html_code.descendants:
if type(SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase__ : Optional[int] = html.unescape(SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : List[str] = self.xpath_soup(SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : List[Any] = ""
for tagname, subs in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = False
# Check that strings has a valid type
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = True
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"""but is of type {type(SCREAMING_SNAKE_CASE )}.""" )
lowercase__ : List[Any] = bool(isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , SCREAMING_SNAKE_CASE )) )
if not is_batched:
lowercase__ : Optional[int] = [html_strings]
# Get nodes + xpaths
lowercase__ : Optional[Any] = []
lowercase__ : Any = []
for html_string in html_strings:
lowercase__ , lowercase__ , lowercase__ : List[str] = self.get_three_from_single(SCREAMING_SNAKE_CASE )
nodes.append(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = []
for node, tag_list, sub_list in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : str = self.construct_xpath(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
xpath_strings.append(SCREAMING_SNAKE_CASE )
xpaths.append(SCREAMING_SNAKE_CASE )
# return as Dict
lowercase__ : List[Any] = {"nodes": nodes, "xpaths": xpaths}
lowercase__ : Union[str, Any] = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
return encoded_inputs
| 720 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case__(datasets.BuilderConfig ):
"""simple docstring"""
lowercase_ = 1_0_0_0_0
lowercase_ = None
lowercase_ = None
class snake_case__(datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowercase_ = ParquetConfig
def snake_case ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase__ : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
lowercase__ : Any = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ : Union[str, Any] = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowercase__ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ : int = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
lowercase__ : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(_SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={"files": files} ) )
return splits
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ : Any = table_cast(_SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
lowercase__ : List[Any] = pq.ParquetFile(_SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowercase__ : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(_SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(_SCREAMING_SNAKE_CASE )}: {e}""" )
raise
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """van"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=224 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Optional[Any]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE : Dict=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE : List[Any]=[64, 128, 320, 512] , SCREAMING_SNAKE_CASE : Any=[3, 3, 12, 3] , SCREAMING_SNAKE_CASE : Any=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE : Dict=1E-6 , SCREAMING_SNAKE_CASE : Dict=1E-2 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = patch_sizes
lowercase__ : Optional[Any] = strides
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : Union[str, Any] = depths
lowercase__ : Tuple = mlp_ratios
lowercase__ : str = hidden_act
lowercase__ : int = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Union[str, Any] = layer_scale_init_value
lowercase__ : Tuple = drop_path_rate
lowercase__ : Any = dropout_rate
| 701 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = F"""{sampling_rate}"""
lowercase__ : List[Any] = "1"
lowercase__ : Optional[int] = "f32le"
lowercase__ : Tuple = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ : Any = ffmpeg_process.communicate(lowerCamelCase__ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowercase__ : List[Any] = output_stream[0]
lowercase__ : List[str] = np.frombuffer(lowerCamelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "f32le" , ):
"""simple docstring"""
lowercase__ : List[str] = F"""{sampling_rate}"""
lowercase__ : Union[str, Any] = "1"
if format_for_conversion == "s16le":
lowercase__ : Dict = 2
elif format_for_conversion == "f32le":
lowercase__ : Optional[int] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ : Optional[int] = platform.system()
if system == "Linux":
lowercase__ : Optional[int] = "alsa"
lowercase__ : List[str] = "default"
elif system == "Darwin":
lowercase__ : Union[str, Any] = "avfoundation"
lowercase__ : int = ":0"
elif system == "Windows":
lowercase__ : Optional[Any] = "dshow"
lowercase__ : List[Any] = "default"
lowercase__ : List[str] = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowercase__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ : Optional[int] = _ffmpeg_stream(lowerCamelCase__ , lowerCamelCase__ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
lowercase__ : int = stream_chunk_s
else:
lowercase__ : Optional[int] = chunk_length_s
lowercase__ : int = ffmpeg_microphone(lowerCamelCase__ , lowerCamelCase__ , format_for_conversion=lowerCamelCase__ )
if format_for_conversion == "s16le":
lowercase__ : Union[str, Any] = np.intaa
lowercase__ : Optional[int] = 2
elif format_for_conversion == "f32le":
lowercase__ : int = np.floataa
lowercase__ : List[str] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ : int = chunk_length_s / 6
lowercase__ : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase__ , (int, float) ):
lowercase__ : Optional[Any] = [stride_length_s, stride_length_s]
lowercase__ : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ : int = datetime.datetime.now()
lowercase__ : str = datetime.timedelta(seconds=lowerCamelCase__ )
for item in chunk_bytes_iter(lowerCamelCase__ , lowerCamelCase__ , stride=(stride_left, stride_right) , stream=lowerCamelCase__ ):
# Put everything back in numpy scale
lowercase__ : Tuple = np.frombuffer(item["raw"] , dtype=lowerCamelCase__ )
lowercase__ : Tuple = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowercase__ : Dict = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = b""
lowercase__ : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase__ ) < chunk_len:
lowercase__ : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase__ ) >= chunk_len:
# We are flushing the accumulator
lowercase__ : Union[str, Any] = (_stride_left, stride_right)
lowercase__ : Optional[int] = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowercase__ : List[str] = False
yield item
lowercase__ : Optional[Any] = stride_left
lowercase__ : Any = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase__ ) > stride_left:
lowercase__ : str = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowercase__ : str = False
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase__ , stdout=subprocess.PIPE , bufsize=lowerCamelCase__ ) as ffmpeg_process:
while True:
lowercase__ : Optional[Any] = ffmpeg_process.stdout.read(lowerCamelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 702 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase__ = '''UperNetConfig'''
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int], str] = 0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
lowercase__ : str = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE , )
lowercase__ : int = nn.BatchNormad(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = nn.ReLU()
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : torch.Tensor ):
lowercase__ : str = self.conv(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.batch_norm(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.activation(SCREAMING_SNAKE_CASE )
return output
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
super().__init__()
lowercase__ : List[str] = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE ),
UperNetConvModule(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : torch.Tensor ):
lowercase__ : List[Any] = input
for layer in self.layers:
lowercase__ : Optional[int] = layer(SCREAMING_SNAKE_CASE )
return hidden_state
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple[int, ...] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool ):
super().__init__()
lowercase__ : Dict = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Tuple = in_channels
lowercase__ : Union[str, Any] = channels
lowercase__ : str = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , channels=SCREAMING_SNAKE_CASE )
self.blocks.append(SCREAMING_SNAKE_CASE )
self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : torch.Tensor ):
lowercase__ : List[Any] = []
for ppm in self.blocks:
lowercase__ : Optional[Any] = ppm(SCREAMING_SNAKE_CASE )
lowercase__ : Any = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE )
return ppm_outs
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
super().__init__()
lowercase__ : Tuple = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : int = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Dict = False
lowercase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__ : str = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__ : Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__ : Tuple = nn.ModuleList()
lowercase__ : Tuple = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : Any = UperNetConvModule(SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
lowercase__ : List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE )
self.fpn_convs.append(SCREAMING_SNAKE_CASE )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case ( self : Optional[Any] ):
self.apply(self._init_weights )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Optional[Any] = inputs[-1]
lowercase__ : Union[str, Any] = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE , dim=1 )
lowercase__ : Union[str, Any] = self.bottleneck(SCREAMING_SNAKE_CASE )
return output
def snake_case ( self : str , SCREAMING_SNAKE_CASE : torch.Tensor ):
# build laterals
lowercase__ : Optional[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE ) )
# build top-down path
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__ : Optional[Any] = laterals[i - 1].shape[2:]
lowercase__ : Tuple = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE , mode="bilinear" , align_corners=self.align_corners )
# build outputs
lowercase__ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__ : str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
lowercase__ : List[str] = torch.cat(SCREAMING_SNAKE_CASE , dim=1 )
lowercase__ : Optional[Any] = self.fpn_bottleneck(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.classifier(SCREAMING_SNAKE_CASE )
return output
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
lowercase__ : int = config
lowercase__ : Union[str, Any] = config.auxiliary_in_channels
lowercase__ : Dict = config.auxiliary_channels
lowercase__ : Tuple = config.auxiliary_num_convs
lowercase__ : Union[str, Any] = config.auxiliary_concat_input
lowercase__ : List[str] = in_index
lowercase__ : Union[str, Any] = (kernel_size // 2) * dilation
lowercase__ : str = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
lowercase__ : str = nn.Identity()
else:
lowercase__ : Union[str, Any] = nn.Sequential(*SCREAMING_SNAKE_CASE )
if self.concat_input:
lowercase__ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
lowercase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case ( self : Optional[Any] ):
self.apply(self._init_weights )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] ):
if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : torch.Tensor ):
# just take the relevant feature maps
lowercase__ : Union[str, Any] = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(SCREAMING_SNAKE_CASE )
if self.concat_input:
lowercase__ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__ : Optional[Any] = self.classifier(SCREAMING_SNAKE_CASE )
return output
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = UperNetConfig
lowercase_ = """pixel_values"""
lowercase_ = True
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case ( self : Optional[int] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=False ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Any = value
lowerCAmelCase__ = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , _UpperCamelCase , )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : List[str] = UperNetHead(SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
lowercase__ : List[str] = UperNetFCNHead(SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : str = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Any = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = outputs.feature_maps
lowercase__ : Optional[int] = self.decode_head(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = nn.functional.interpolate(SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
lowercase__ : str = None
if self.auxiliary_head is not None:
lowercase__ : Optional[int] = self.auxiliary_head(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
lowercase__ : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
lowercase__ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : List[Any] = (logits,) + outputs[1:]
else:
lowercase__ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 703 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 704 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class snake_case__(Generic[T] ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : list[T] , SCREAMING_SNAKE_CASE : Callable[[T, T], T] ):
lowercase__ : Any | T = None
lowercase__ : int = len(SCREAMING_SNAKE_CASE )
lowercase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowercase__ : List[Any] = fnc
self.build()
def snake_case ( self : Union[str, Any] ):
for p in range(self.N - 1 , 0 , -1 ):
lowercase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : T ):
p += self.N
lowercase__ : Union[str, Any] = v
while p > 1:
lowercase__ : Tuple = p // 2
lowercase__ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): # noqa: E741
lowercase__ : List[str] = l + self.N, r + self.N
lowercase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowercase__ : Union[str, Any] = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
lowercase__ : List[str] = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[r] )
lowercase__ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCAmelCase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCAmelCase__ = SegmentTree(test_array, min)
lowerCAmelCase__ = SegmentTree(test_array, max)
lowerCAmelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ):
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowercase__ : str = reduce(lowerCamelCase__ , test_array[i : j + 1] )
lowercase__ : Union[str, Any] = reduce(lowerCamelCase__ , test_array[i : j + 1] )
lowercase__ : str = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 705 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Tuple=10 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=32 * 4 , SCREAMING_SNAKE_CASE : Any=32 * 6 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Any=32 , ):
lowercase__ : Optional[Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : str = is_training
lowercase__ : Dict = use_auxiliary_loss
lowercase__ : List[str] = num_queries
lowercase__ : int = num_channels
lowercase__ : List[str] = min_size
lowercase__ : Any = max_size
lowercase__ : Union[str, Any] = num_labels
lowercase__ : List[Any] = mask_feature_size
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE ) > 0.5
).float()
lowercase__ : str = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE ) > 0.5).long()
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case ( self : List[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ : Optional[int] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : int = output.encoder_hidden_states
lowercase__ : List[Any] = output.pixel_decoder_hidden_states
lowercase__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=False ):
with torch.no_grad():
lowercase__ : int = MaskFormerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = MaskFormerForInstanceSegmentation(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase__ : str = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
comm_check_on_output(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(
pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE )
comm_check_on_output(SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Tuple ):
lowercase__ : Dict = MaskFormerModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def snake_case ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def snake_case ( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : List[str] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Any ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase__ : str = MaskFormerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = (self.model_tester.min_size,) * 2
lowercase__ : Optional[int] = {
"pixel_values": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE ),
"mask_labels": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE ),
"class_labels": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE ).long(),
}
lowercase__ : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = model(**SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def snake_case ( self : Dict ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def snake_case ( self : str ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase__ : Any = self.all_model_classes[1]
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Dict ):
# only MaskFormerForInstanceSegmentation has the loss
lowercase__ : Tuple = self.all_model_classes[1]
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowercase__ : List[Any] = True
lowercase__ : Tuple = True
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE )
lowercase__ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase__ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1e-4
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : int ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[int] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
lowercase__ : Dict = model(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(SCREAMING_SNAKE_CASE )
.eval()
)
lowercase__ : str = self.default_image_processor
lowercase__ : Dict = prepare_img()
lowercase__ : List[str] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
lowercase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowercase__ : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase__ : Optional[int] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowercase__ : Tuple = torch.tensor(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowercase__ : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase__ : Tuple = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def snake_case ( self : int ):
lowercase__ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(SCREAMING_SNAKE_CASE )
.eval()
)
lowercase__ : int = self.default_image_processor
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 800, 1_088) )
with torch.no_grad():
lowercase__ : Any = model(**SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowercase__ : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase__ : Optional[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowercase__ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowercase__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase__ : Dict = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(SCREAMING_SNAKE_CASE )
.eval()
)
lowercase__ : Dict = self.default_image_processor
lowercase__ : List[str] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowercase__ : Tuple = inputs["pixel_values"].to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE ) for el in inputs["mask_labels"]]
lowercase__ : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE ) for el in inputs["class_labels"]]
with torch.no_grad():
lowercase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 706 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(lowerCamelCase__ ) , lowerCamelCase__ )
return number - int(lowerCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 707 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""input_features"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int=80 , SCREAMING_SNAKE_CASE : Union[str, Any]=16_000 , SCREAMING_SNAKE_CASE : Optional[int]=160 , SCREAMING_SNAKE_CASE : Optional[Any]=30 , SCREAMING_SNAKE_CASE : Dict=400 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : int=False , **SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : List[str] = n_fft
lowercase__ : Any = hop_length
lowercase__ : Dict = chunk_length
lowercase__ : Union[str, Any] = chunk_length * sampling_rate
lowercase__ : Any = self.n_samples // hop_length
lowercase__ : List[Any] = sampling_rate
lowercase__ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.array ):
lowercase__ : Tuple = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase__ : Tuple = log_spec[:, :-1]
lowercase__ : Any = np.maximum(SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
lowercase__ : Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : float = 0.0 ):
if attention_mask is not None:
lowercase__ : List[str] = np.array(SCREAMING_SNAKE_CASE , np.intaa )
lowercase__ : List[str] = []
for vector, length in zip(SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
lowercase__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__ : List[Any] = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , **SCREAMING_SNAKE_CASE : List[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : str = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
lowercase__ : Tuple = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Optional[Any] = [np.asarray([raw_speech] ).T]
lowercase__ : List[str] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase__ : List[Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase__ : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase__ : Dict = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase__ : Tuple = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase__ : str = [self._np_extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
lowercase__ : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase__ : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase__ : str = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
def snake_case ( self : Dict ):
lowercase__ : Dict = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 709 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 0 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return array
lowercase__ : Tuple = min(lowerCamelCase__ ), max(lowerCamelCase__ )
# Compute the variables
lowercase__ : str = _max - _min + 1
lowercase__ : Dict = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase__ : int = i - _min
lowercase__ : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase__ : Optional[int] = 0
for i in range(lowerCamelCase__ ):
while holes_repeat[i] > 0:
lowercase__ : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by comma:\n''')
lowerCAmelCase__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 710 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 0 |
lowerCAmelCase__ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCAmelCase__ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = start
# add current to visited
visited.append(lowerCamelCase__ )
lowercase__ : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase__ : int = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
lowercase__ : Tuple = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ = topological_sort('''a''', [], [])
print(sort)
| 711 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case ( self : int ):
lowercase__ : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
lowercase__ : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
lowercase__ : int = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
lowercase__ : List[Any] = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowercase__ : List[Any] = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
lowercase__ : int = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
lowercase__ : Optional[int] = text_classifier("This is great !" , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
lowercase__ : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
lowercase__ : Optional[int] = text_classifier(["This is great !", "Something else"] , return_all_scores=SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def snake_case ( self : List[Any] ):
import torch
lowercase__ : List[Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
lowercase__ : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def snake_case ( self : List[str] ):
lowercase__ : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
lowercase__ : Union[str, Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = pipeline("text-classification" )
lowercase__ : int = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
lowercase__ : Any = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowercase__ : str = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[int] = pipeline("text-classification" , framework="tf" )
lowercase__ : List[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
lowercase__ : Union[str, Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
lowercase__ : Optional[Any] = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.988}] )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Union[str, Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase__ : str = "HuggingFace is in"
lowercase__ : str = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
lowercase__ : Dict = ["HuggingFace is in ", "Paris is in France"]
lowercase__ : Tuple = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}, {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase__ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [[{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N, [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] * N] , )
lowercase__ : Union[str, Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
lowercase__ : Dict = text_classifier(SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase__ : Optional[int] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(SCREAMING_SNAKE_CASE ):
text_classifier(SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase__ : Tuple = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [{"label": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 712 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase__ = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase__ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase__ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase__ = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 713 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 714 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 715 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 716 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 0 |
import requests
lowerCAmelCase__ = '''YOUR API KEY'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = giphy_api_key ):
"""simple docstring"""
lowercase__ : Union[str, Any] = "+".join(query.split() )
lowercase__ : List[str] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase__ : int = requests.get(lowerCamelCase__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 717 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowerCAmelCase__ = {
'''vinai/phobert-base''': 2_5_6,
'''vinai/phobert-large''': 2_5_6,
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = set()
lowercase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : str = char
lowercase__ : int = set(lowerCamelCase__ )
return pairs
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]="<s>" , SCREAMING_SNAKE_CASE : List[str]="</s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : Any="<s>" , SCREAMING_SNAKE_CASE : Dict="<unk>" , SCREAMING_SNAKE_CASE : Dict="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : Any = vocab_file
lowercase__ : Tuple = merges_file
lowercase__ : Dict = {}
lowercase__ : Any = 0
lowercase__ : int = 1
lowercase__ : Union[str, Any] = 2
lowercase__ : Union[str, Any] = 3
self.add_from_file(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowercase__ : Dict = merges_handle.read().split("\n" )[:-1]
lowercase__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
lowercase__ : Any = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Union[str, Any] = {}
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
lowercase__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Any = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : Optional[int] ):
return len(self.encoder )
def snake_case ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if token in self.cache:
return self.cache[token]
lowercase__ : Tuple = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase__ : Union[str, Any] = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ : List[str] = bigram
lowercase__ : Dict = []
lowercase__ : Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
lowercase__ : Any = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Any = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase__ : Any = get_pairs(SCREAMING_SNAKE_CASE )
lowercase__ : str = "@@ ".join(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = word[:-4]
lowercase__ : Dict = word
return word
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : List[str] = []
lowercase__ : Any = re.findall(r"\S+\n?" , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Any = " ".join(SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Dict = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Any = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowercase__ : Any = f.readlines()
for lineTmp in lines:
lowercase__ : Tuple = lineTmp.strip()
lowercase__ : str = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
lowercase__ : str = line[:idx]
lowercase__ : str = len(self.encoder )
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=13 , SCREAMING_SNAKE_CASE : Dict=[30, 30] , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : Union[str, Any]=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Any=37 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : str=10 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=8 , SCREAMING_SNAKE_CASE : Optional[Any]=10 , ):
lowercase__ : Tuple = parent
lowercase__ : Any = batch_size
lowercase__ : Any = image_size
lowercase__ : str = patch_size
lowercase__ : int = num_channels
lowercase__ : Dict = is_training
lowercase__ : Dict = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Optional[Any] = scope
lowercase__ : List[Any] = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Any = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : Optional[Any] ):
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : Union[str, Any] = []
for i in range(self.batch_size ):
lowercase__ : Dict = {}
lowercase__ : List[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = YolosModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[str] = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(pixel_values=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ : List[str] = config_and_inputs
lowercase__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False ):
lowercase__ : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[int] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : int = {}
lowercase__ : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE , dtype=torch.long )
lowercase__ : List[str] = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE )
lowercase__ : str = labels
return inputs_dict
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = YolosModelTester(self )
lowercase__ : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
# in YOLOS, the seq_len is different
lowercase__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[str] = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Optional[Any] = True
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : Dict ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : str = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
lowercase__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Any ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(inputs.pixel_values )
# verify outputs
lowercase__ : Dict = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify postprocessing
lowercase__ : Any = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = [75, 75, 17, 63, 17]
lowercase__ : Dict = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results["boxes"][0, :] , SCREAMING_SNAKE_CASE ) )
| 719 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 0 |
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : list ):
lowercase__ : Dict = set_counts
lowercase__ : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE )
lowercase__ : str = [1] * num_sets
lowercase__ : Union[str, Any] = list(range(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = self.get_parent(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.get_parent(SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ : Tuple = 0
lowercase__ : List[str] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[Any] = src_parent
lowercase__ : Tuple = self.set_counts[src_parent]
lowercase__ : str = max(self.max_set , SCREAMING_SNAKE_CASE )
return True
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ : Optional[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 720 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Optional[Any]=400 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=1 / 255 , SCREAMING_SNAKE_CASE : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ : Union[str, Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Tuple = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Any = do_normalize
lowercase__ : str = image_mean
lowercase__ : Dict = image_std
lowercase__ : Optional[Any] = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : List[Any] = do_pad
def snake_case ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=False ):
if not batched:
lowercase__ : Optional[int] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
lowercase__ : Union[str, Any] = image.size
else:
lowercase__ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
lowercase__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
lowercase__ : Tuple = self.size["shortest_edge"]
lowercase__ : Dict = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ : List[str] = self.size["shortest_edge"]
lowercase__ : List[str] = self.size["shortest_edge"]
else:
lowercase__ : Dict = []
for image in image_inputs:
lowercase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
lowercase__ : Optional[Any] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = ConditionalDetrImageProcessor if is_vision_available() else None
def snake_case ( self : str ):
lowercase__ : str = ConditionalDetrImageProcessingTester(self )
@property
def snake_case ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : Any ):
# Initialize image_processing
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : List[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self : List[str] ):
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ : Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self : int ):
# prepare image and target
lowercase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase__ : str = json.loads(f.read() )
lowercase__ : Dict = {"image_id": 39_769, "annotations": target}
# encode them
lowercase__ : Union[str, Any] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowercase__ : List[str] = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
lowercase__ : Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
lowercase__ : Any = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) )
# verify boxes
lowercase__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
lowercase__ : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowercase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowercase__ : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) )
# verify orig_size
lowercase__ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) )
# verify size
lowercase__ : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
@slow
def snake_case ( self : Dict ):
# prepare image, target and masks_path
lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase__ : str = json.loads(f.read() )
lowercase__ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowercase__ : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
lowercase__ : Dict = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
lowercase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
lowercase__ : Tuple = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE ) )
# verify boxes
lowercase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
lowercase__ : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
lowercase__ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
lowercase__ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE ) )
# verify masks
lowercase__ : Union[str, Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE )
# verify orig_size
lowercase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE ) )
# verify size
lowercase__ : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE ) )
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 0 |
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] ):
lowercase__ : Tuple = ""
lowercase__ : List[Any] = ""
lowercase__ : Union[str, Any] = []
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase__ : Tuple = self.__min_dist_top_down_dp(SCREAMING_SNAKE_CASE , n - 1 )
lowercase__ : Any = self.__min_dist_top_down_dp(m - 1 , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase__ : str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self.dp[m][n]
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Dict = worda
lowercase__ : Union[str, Any] = worda
lowercase__ : Optional[int] = [[-1 for _ in range(len(SCREAMING_SNAKE_CASE ) )] for _ in range(len(SCREAMING_SNAKE_CASE ) )]
return self.__min_dist_top_down_dp(len(SCREAMING_SNAKE_CASE ) - 1 , len(SCREAMING_SNAKE_CASE ) - 1 )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Dict = worda
lowercase__ : Optional[Any] = worda
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ : Optional[Any] = j
elif j == 0: # second string is empty
lowercase__ : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ : Optional[int] = self.dp[i - 1][j - 1]
else:
lowercase__ : str = self.dp[i][j - 1]
lowercase__ : Union[str, Any] = self.dp[i - 1][j]
lowercase__ : List[str] = self.dp[i - 1][j - 1]
lowercase__ : Dict = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowerCAmelCase__ = input('''Enter the first string: ''').strip()
lowerCAmelCase__ = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while second != 0:
lowercase__ : Dict = first & second
first ^= second
lowercase__ : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter the first number: ''').strip())
lowerCAmelCase__ = int(input('''Enter the second number: ''').strip())
print(f'''{add(first, second) = }''')
| 701 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__(TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : List[str] ):
super().__init__(features=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] ):
import torch
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(SCREAMING_SNAKE_CASE )
return column
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
import torch
if isinstance(SCREAMING_SNAKE_CASE , (str, bytes, type(SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase__ : Union[str, Any] = {}
if isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase__ : List[str] = {"dtype": torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase__ : Optional[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowercase__ : Tuple = np.asarray(SCREAMING_SNAKE_CASE )
return torch.tensor(SCREAMING_SNAKE_CASE , **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE , "__array__" ) and not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
lowercase__ : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : dict ):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE , map_list=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : pa.Table ):
lowercase__ : Dict = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : pa.Table ):
lowercase__ : Optional[int] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE )
lowercase__ : str = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
lowercase__ : Optional[int] = self.recursive_tensorize(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self._consolidate(SCREAMING_SNAKE_CASE )
return column
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : pa.Table ):
lowercase__ : Tuple = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE )
for column_name in batch:
lowercase__ : List[Any] = self._consolidate(batch[column_name] )
return batch
| 702 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase__ = 1_2_8_0_2_2
lowerCAmelCase__ = 1_2_8_0_2_8
@require_sentencepiece
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = MaMaaaTokenizer
lowercase_ = False
lowercase_ = False
lowercase_ = True
def snake_case ( self : str ):
super().setUp()
lowercase__ : Union[str, Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Dict = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowercase__ : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any ):
return (
"This is a test",
"This is a test",
)
def snake_case ( self : Dict ):
lowercase__ : Any = "</s>"
lowercase__ : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Tuple = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def snake_case ( self : Any ):
pass
def snake_case ( self : Any ):
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
lowercase__ : List[str] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , "This is a test" )
@slow
def snake_case ( self : List[str] ):
# fmt: off
lowercase__ : List[str] = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = """facebook/m2m100_418M"""
lowercase_ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowercase_ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowercase_ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def snake_case ( cls : Optional[int] ):
lowercase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
lowercase__ : Optional[int] = 1
return cls
def snake_case ( self : Tuple ):
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def snake_case ( self : int ):
lowercase__ : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : int = "en"
lowercase__ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
self.assertIn(SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
lowercase__ : Optional[int] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowercase__ : Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Any = tempfile.mkdtemp()
lowercase__ : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE )
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = "en"
lowercase__ : Dict = "fr"
lowercase__ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : str = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase__ : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : Dict = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase__ : Optional[int] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def snake_case ( self : str ):
lowercase__ : str = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase__ : str = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 703 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
lowercase__ : str = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
lowercase__ : List[str] = -1
return False
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 704 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 0 |
import unittest
from knapsack import knapsack as k
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = 0
lowercase__ : Union[str, Any] = [0]
lowercase__ : List[str] = [0]
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 )
lowercase__ : Tuple = [60]
lowercase__ : List[str] = [10]
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = 3
lowercase__ : List[Any] = [1, 2, 3]
lowercase__ : Optional[Any] = [3, 2, 1]
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 5 )
def snake_case ( self : Tuple ):
lowercase__ : List[str] = 50
lowercase__ : Tuple = [60, 100, 120]
lowercase__ : Optional[Any] = [10, 20, 30]
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 220 )
if __name__ == "__main__":
unittest.main()
| 705 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class snake_case__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
lowercase__ : int = img
lowercase__ : Optional[Any] = img.shape[1]
lowercase__ : Tuple = img.shape[0]
lowercase__ : Tuple = dst_width
lowercase__ : List[Any] = dst_height
lowercase__ : Optional[int] = self.src_w / self.dst_w
lowercase__ : Union[str, Any] = self.src_h / self.dst_h
lowercase__ : List[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case ( self : Any ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Tuple = self.img[self.get_y(SCREAMING_SNAKE_CASE )][self.get_x(SCREAMING_SNAKE_CASE )]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int ):
return int(self.ratio_x * x )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = 8_0_0, 6_0_0
lowerCAmelCase__ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 706 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """codegen"""
lowercase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple=50_400 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , SCREAMING_SNAKE_CASE : Optional[int]=2_048 , SCREAMING_SNAKE_CASE : Optional[int]=4_096 , SCREAMING_SNAKE_CASE : Optional[int]=28 , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : Union[str, Any]=64 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Any=1E-5 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=50_256 , SCREAMING_SNAKE_CASE : Any=50_256 , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : str , ):
lowercase__ : Optional[Any] = vocab_size
lowercase__ : int = n_ctx
lowercase__ : Tuple = n_positions
lowercase__ : List[Any] = n_embd
lowercase__ : List[str] = n_layer
lowercase__ : Any = n_head
lowercase__ : Any = n_inner
lowercase__ : Union[str, Any] = rotary_dim
lowercase__ : List[str] = activation_function
lowercase__ : Optional[int] = resid_pdrop
lowercase__ : List[str] = embd_pdrop
lowercase__ : Optional[Any] = attn_pdrop
lowercase__ : List[str] = layer_norm_epsilon
lowercase__ : List[Any] = initializer_range
lowercase__ : int = use_cache
lowercase__ : Any = bos_token_id
lowercase__ : Dict = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE , task=SCREAMING_SNAKE_CASE , patching_specs=SCREAMING_SNAKE_CASE , use_past=SCREAMING_SNAKE_CASE )
if not getattr(self._config , "pad_token_id" , SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
lowercase__ : Union[str, Any] = 0
@property
def snake_case ( self : Optional[Any] ):
lowercase__ : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="inputs" )
lowercase__ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case ( self : List[str] ):
return self._config.n_layer
@property
def snake_case ( self : Optional[int] ):
return self._config.n_head
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
lowercase__ : Optional[Any] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
lowercase__ : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ : Union[str, Any] = seqlen + 2
lowercase__ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ : Optional[Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
lowercase__ : Dict = common_inputs["attention_mask"]
if self.use_past:
lowercase__ : str = ordered_inputs["attention_mask"].dtype
lowercase__ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def snake_case ( self : List[str] ):
return 13
| 707 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''roberta-base''': 5_1_2,
'''roberta-large''': 5_1_2,
'''roberta-large-mnli''': 5_1_2,
'''distilroberta-base''': 5_1_2,
'''roberta-base-openai-detector''': 5_1_2,
'''roberta-large-openai-detector''': 5_1_2,
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
lowercase_ = RobertaTokenizer
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Any="replace" , SCREAMING_SNAKE_CASE : List[str]="<s>" , SCREAMING_SNAKE_CASE : Tuple="</s>" , SCREAMING_SNAKE_CASE : Any="</s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE : Any="<unk>" , SCREAMING_SNAKE_CASE : Any="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Optional[int]=True , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase__ : Tuple = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
lowercase__ : str = add_prefix_space
lowercase__ : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = add_prefix_space
lowercase__ : List[str] = "post_processor"
lowercase__ : List[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
lowercase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ : List[Any] = tuple(state["sep"] )
if "cls" in state:
lowercase__ : Union[str, Any] = tuple(state["cls"] )
lowercase__ : str = False
if state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase__ : Tuple = add_prefix_space
lowercase__ : Dict = True
if state.get("trim_offsets" , SCREAMING_SNAKE_CASE ) != trim_offsets:
lowercase__ : int = trim_offsets
lowercase__ : Optional[int] = True
if changes_to_apply:
lowercase__ : List[str] = getattr(SCREAMING_SNAKE_CASE , state.pop("type" ) )
lowercase__ : List[str] = component_class(**SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value
lowercase__ : Union[str, Any] = value
def snake_case ( self : Tuple , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ):
lowercase__ : str = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Dict = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
lowercase__ : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
lowercase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 708 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.