code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE : str = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[int]=None , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : Dict=None , lowercase_ : Dict=None , lowercase_ : Tuple=None , ) -> Any:
if attention_mask is None:
_lowerCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowerCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowerCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = initializer_range
def snake_case__ ( self ):
_lowerCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCamelCase = shift_tokens_right(_A , 1 , 2 )
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
_lowerCamelCase = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = 2_0
_lowerCamelCase = model_class_name(_A )
_lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
_lowerCamelCase , _lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
_lowerCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
_lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
_lowerCamelCase = model.decode(_A , _A )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = 2_0
_lowerCamelCase = model_class_name(_A )
_lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
_lowerCamelCase , _lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_lowerCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
_lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
_lowerCamelCase = model.decode(_A , _A , decoder_attention_mask=_A )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 99
def snake_case__ ( self ):
_lowerCamelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowerCamelCase = input_ids.shape[0]
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._get_config_and_data()
_lowerCamelCase = FlaxBlenderbotSmallForConditionalGeneration(_A )
_lowerCamelCase = lm_model(input_ids=_A )
_lowerCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def snake_case__ ( self ):
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowerCamelCase = FlaxBlenderbotSmallForConditionalGeneration(_A )
_lowerCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowerCamelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowerCamelCase = lm_model(input_ids=_A , decoder_input_ids=_A )
_lowerCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def snake_case__ ( self ):
_lowerCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowerCamelCase = shift_tokens_right(_A , 1 , 2 )
_lowerCamelCase = np.equal(_A , 1 ).astype(np.floataa ).sum()
_lowerCamelCase = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_( __lowercase, unittest.TestCase, __lowercase ):
'''simple docstring'''
lowercase__ : str = True
lowercase__ : Dict = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ : Union[str, Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self ):
_lowerCamelCase = FlaxBlenderbotSmallModelTester(self )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(_A , _A )
_lowerCamelCase = model_class(_A )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = model_class(_A )
_lowerCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_lowerCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCamelCase = model(_A )
self.assertIsNotNone(_A )
| 705 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class lowerCamelCase_( __a ):
'''simple docstring'''
def __init__( self , **lowerCamelCase__ ):
super().__init__(**snake_case__ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case__ )
def snake_case__ ( self , **lowerCamelCase__ ):
_lowerCamelCase = {}
_lowerCamelCase = {}
_lowerCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_lowerCamelCase = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
_lowerCamelCase = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
_lowerCamelCase = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
_lowerCamelCase = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
_lowerCamelCase = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
_lowerCamelCase = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
_lowerCamelCase = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
_lowerCamelCase = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
_lowerCamelCase = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
_lowerCamelCase = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
_lowerCamelCase = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
_lowerCamelCase = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
return super().__call__(snake_case__ , *snake_case__ , num_workers=snake_case__ , batch_size=snake_case__ , **snake_case__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=6_4 , lowerCamelCase__ = 0 , lowerCamelCase__ = 5_1_2 / 1_5_0_0 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 1 , ):
_lowerCamelCase = load_image(snake_case__ )
_lowerCamelCase = self.image_processor.size['''longest_edge''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.image_processor.generate_crop_boxes(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase = self.image_processor(images=snake_case__ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
_lowerCamelCase = self.get_inference_context()
with inference_context():
_lowerCamelCase = self._ensure_tensor_on_device(snake_case__ , device=self.device )
_lowerCamelCase = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
_lowerCamelCase = image_embeddings
_lowerCamelCase = grid_points.shape[1]
_lowerCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case__ , snake_case__ ):
_lowerCamelCase = grid_points[:, i : i + points_per_batch, :, :]
_lowerCamelCase = input_labels[:, i : i + points_per_batch]
_lowerCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0.8_8 , lowerCamelCase__=0.9_5 , lowerCamelCase__=0 , lowerCamelCase__=1 , ):
_lowerCamelCase = model_inputs.pop('''input_boxes''' )
_lowerCamelCase = model_inputs.pop('''is_last''' )
_lowerCamelCase = model_inputs.pop('''original_sizes''' ).tolist()
_lowerCamelCase = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
_lowerCamelCase = self.model(**snake_case__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_lowerCamelCase = model_outputs['''pred_masks''']
_lowerCamelCase = self.image_processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , binarize=snake_case__ )
_lowerCamelCase = model_outputs['''iou_scores''']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.7 , ):
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
_lowerCamelCase = torch.cat(snake_case__ )
_lowerCamelCase = torch.cat(snake_case__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self.image_processor.post_process_for_mask_generation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase = defaultdict(snake_case__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case__ )
_lowerCamelCase = {}
if output_rle_mask:
_lowerCamelCase = rle_mask
if output_bboxes_mask:
_lowerCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 706 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = '''▁'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__SCREAMING_SNAKE_CASE : int = {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
__SCREAMING_SNAKE_CASE : Tuple = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__SCREAMING_SNAKE_CASE : List[Any] = {'''mustc''': MUSTC_LANGS}
class lowerCamelCase_( __lowerCamelCase ):
'''simple docstring'''
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = MAX_MODEL_INPUT_SIZES
lowercase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<unk>" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , do_upper_case=a_ , do_lower_case=a_ , tgt_lang=a_ , lang_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
_lowerCamelCase = do_upper_case
_lowerCamelCase = do_lower_case
_lowerCamelCase = load_json(a_ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = spm_file
_lowerCamelCase = load_spm(a_ , self.sp_model_kwargs )
if lang_codes is not None:
_lowerCamelCase = lang_codes
_lowerCamelCase = LANGUAGES[lang_codes]
_lowerCamelCase = [F"""<lang:{lang}>""" for lang in self.langs]
_lowerCamelCase = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
_lowerCamelCase = self.lang_tokens
_lowerCamelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_lowerCamelCase = {}
@property
def snake_case__ ( self ):
return len(self.encoder )
@property
def snake_case__ ( self ):
return self._tgt_lang
@tgt_lang.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(a_ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[tgt_lang]
_lowerCamelCase = [lang_code_id]
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def snake_case__ ( self , lowerCamelCase__ ):
return self.decoder.get(a_ , self.unk_token )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
_lowerCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase = self.sp_model.decode(a_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase = []
else:
current_sub_tokens.append(a_ )
_lowerCamelCase = self.sp_model.decode(a_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def snake_case__ ( self ):
_lowerCamelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = Path(a_ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_lowerCamelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_lowerCamelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Dict:
'''simple docstring'''
_lowerCamelCase = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ )
spm.Load(str(lowerCAmelCase_ ) )
return spm
def lowerCAmelCase_( lowercase_ : Dict ) -> str:
'''simple docstring'''
with open(lowerCAmelCase_ , '''r''' ) as f:
return json.load(lowerCAmelCase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase_ , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
| 707 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger()
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] = True ) -> List[str]:
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_lowerCamelCase = timm.create_model('''levit_128s''' , pretrained=_snake_case )
else:
_lowerCamelCase = timm.create_model('''levit_128''' , pretrained=_snake_case )
if hidden_sizes == 1_92:
_lowerCamelCase = timm.create_model('''levit_192''' , pretrained=_snake_case )
if hidden_sizes == 2_56:
_lowerCamelCase = timm.create_model('''levit_256''' , pretrained=_snake_case )
if hidden_sizes == 3_84:
_lowerCamelCase = timm.create_model('''levit_384''' , pretrained=_snake_case )
from_model.eval()
_lowerCamelCase = LevitForImageClassificationWithTeacher(_snake_case ).eval()
_lowerCamelCase = OrderedDict()
_lowerCamelCase = from_model.state_dict()
_lowerCamelCase = list(from_model.state_dict().keys() )
_lowerCamelCase = list(our_model.state_dict().keys() )
print(len(_snake_case ) , len(_snake_case ) )
for i in range(len(_snake_case ) ):
_lowerCamelCase = weights[og_keys[i]]
our_model.load_state_dict(_snake_case )
_lowerCamelCase = torch.randn((2, 3, 2_24, 2_24) )
_lowerCamelCase = from_model(_snake_case )
_lowerCamelCase = our_model(_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case ), "The model logits don't match the original one."
_lowerCamelCase = name
print(_snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowerCamelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Dict = None , lowercase_ : int = True ) -> Union[str, Any]:
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = 10_00
_lowerCamelCase = (1, num_labels)
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = num_labels
_lowerCamelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(_snake_case ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
_lowerCamelCase = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
_lowerCamelCase = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _snake_case , names_to_config[model_name] , _snake_case , _snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _snake_case , _snake_case , _snake_case , _snake_case )
return config, expected_shape
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase_( lowerCAmelCase__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
lowercase__ : List[Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowercase__ : Tuple = "document_qa"
lowercase__ : Union[str, Any] = AutoProcessor
lowercase__ : str = VisionEncoderDecoderModel
lowercase__ : List[str] = ["image", "text"]
lowercase__ : Tuple = ["text"]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCamelCase = task_prompt.replace('''{user_input}''' , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = self.pre_processor.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
_lowerCamelCase = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case__ ( self , lowerCamelCase__ ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE )[0]
_lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_lowerCamelCase = re.sub(R'''<.*?>''' , '''''' , _SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
_lowerCamelCase = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 709 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCamelCase_( _A ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'vit'
def __init__( self , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=2_2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=1_6 , **lowerCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = qkv_bias
_lowerCamelCase = encoder_stride
class lowerCamelCase_( _A ):
'''simple docstring'''
lowercase__ : List[str] = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 1e-4
| 710 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Any ) -> str:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase = []
for char_count in range(lowercase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowercase_ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 711 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Optional[int]:
_lowerCamelCase = np.inf
def set_batch_size(lowercase_ : Optional[int] ) -> None:
nonlocal batch_size
if isinstance(__snake_case , __snake_case ):
_lowerCamelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__snake_case , __snake_case ):
_lowerCamelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__snake_case , __snake_case ) and feature.dtype == "binary":
_lowerCamelCase = min(__snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__snake_case , __snake_case )
return None if batch_size is np.inf else batch_size
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
_lowerCamelCase = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths}
_lowerCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_lowerCamelCase = Parquet(
cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , )
def snake_case__ ( self ):
# Build iterable dataset
if self.streaming:
_lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
_lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = dataset
_lowerCamelCase = path_or_buf
_lowerCamelCase = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase = parquet_writer_kwargs
def snake_case__ ( self ):
_lowerCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_lowerCamelCase = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs )
else:
_lowerCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs )
return written
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = 0
_lowerCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __a )
_lowerCamelCase = self.dataset.features.arrow_schema
_lowerCamelCase = pq.ParquetWriter(__a , schema=__a , **__a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __a ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_lowerCamelCase = query_table(
table=self.dataset._data , key=slice(__a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__a )
written += batch.nbytes
writer.close()
return written
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = get_test_to_tester_mapping(UpperCamelCase_ )
_lowerCamelCase = get_test_to_tester_mapping(UpperCamelCase_ )
_lowerCamelCase = {'''BertModelTest''': '''BertModelTester'''}
_lowerCamelCase = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
def snake_case__ ( self ):
_lowerCamelCase = get_model_to_test_mapping(UpperCamelCase_ )
_lowerCamelCase = get_model_to_test_mapping(UpperCamelCase_ )
_lowerCamelCase = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_lowerCamelCase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
def snake_case__ ( self ):
_lowerCamelCase = get_model_to_tester_mapping(UpperCamelCase_ )
_lowerCamelCase = get_model_to_tester_mapping(UpperCamelCase_ )
_lowerCamelCase = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_lowerCamelCase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[Any]:
_lowerCamelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCamelCase = 1_28
elif "12-12" in model_name:
_lowerCamelCase = 12
_lowerCamelCase = 12
elif "14-14" in model_name:
_lowerCamelCase = 14
_lowerCamelCase = 14
elif "16-16" in model_name:
_lowerCamelCase = 16
_lowerCamelCase = 16
else:
raise ValueError('''Model not supported''' )
_lowerCamelCase = "huggingface/label-files"
if "speech-commands" in model_name:
_lowerCamelCase = 35
_lowerCamelCase = "speech-commands-v2-id2label.json"
else:
_lowerCamelCase = 5_27
_lowerCamelCase = "audioset-id2label.json"
_lowerCamelCase = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Optional[int]:
if "module.v" in name:
_lowerCamelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_lowerCamelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_lowerCamelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_lowerCamelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_lowerCamelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCamelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_lowerCamelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_lowerCamelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> str:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = config.hidden_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCAmelCase_( lowercase_ : Tuple ) -> Any:
_lowerCamelCase = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Dict=False ) -> Optional[Any]:
_lowerCamelCase = get_audio_spectrogram_transformer_config(lowerCamelCase__ )
_lowerCamelCase = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_lowerCamelCase = model_name_to_url[model_name]
_lowerCamelCase = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# remove some keys
remove_keys(lowerCamelCase__ )
# rename some keys
_lowerCamelCase = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
# load 🤗 model
_lowerCamelCase = ASTForAudioClassification(lowerCamelCase__ )
model.eval()
model.load_state_dict(lowerCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCamelCase = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
_lowerCamelCase = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
_lowerCamelCase = 10_24 if "speech-commands" not in model_name else 1_28
_lowerCamelCase = ASTFeatureExtractor(mean=lowerCamelCase__ , std=lowerCamelCase__ , max_length=lowerCamelCase__ )
if "speech-commands" in model_name:
_lowerCamelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_lowerCamelCase = dataset[0]["audio"]["array"]
else:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_lowerCamelCase = torchaudio.load(lowerCamelCase__ )
_lowerCamelCase = waveform.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=1_60_00 , return_tensors='''pt''' )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ )
_lowerCamelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCamelCase = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCamelCase = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCamelCase = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCamelCase = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCamelCase = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCamelCase = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCamelCase = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : Dict ) -> bool:
_lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCamelCase = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str ) -> bool:
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = str(id_ )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = []
_lowerCamelCase = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ):
return self.key < other.key
def __repr__( self ):
return self.id
def snake_case__ ( self , lowerCamelCase__ ):
self.neighbors.append(UpperCAmelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = weight
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : int ) -> Any:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase_ )
graph[b - 1].add_edge(graph[a - 1] , lowercase_ )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Dict ) -> list:
_lowerCamelCase = []
for u in graph:
_lowerCamelCase = math.inf
_lowerCamelCase = None
_lowerCamelCase = 0
_lowerCamelCase = graph[:]
while q:
_lowerCamelCase = min(lowercase_ )
q.remove(lowercase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCamelCase = u
_lowerCamelCase = u.edges[v.id]
for i in range(1 , len(lowercase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Iterator[tuple]:
for u in graph:
_lowerCamelCase = math.inf
_lowerCamelCase = None
_lowerCamelCase = 0
_lowerCamelCase = list(lowercase_ )
hq.heapify(lowercase_ )
while h:
_lowerCamelCase = hq.heappop(lowercase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCamelCase = u
_lowerCamelCase = u.edges[v.id]
hq.heapify(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase_( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = BioGptModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# create attention mask
_lowerCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = self.seq_length // 2
_lowerCamelCase = 0
# first forward pass
_lowerCamelCase , _lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_lowerCamelCase = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1
_lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_lowerCamelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , )
# get two different outputs
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
_lowerCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
# first forward pass
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase , _lowerCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
'''last_hidden_state'''
]
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__ ( self , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = BioGptModel(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase__ : Tuple = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = BioGptModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case__ ( self ):
_lowerCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_lowerCamelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_lowerCamelCase = tokenizer.eos_token
_lowerCamelCase = model.config.eos_token_id
# use different length sentences to test batching
_lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = inputs['''input_ids'''].to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = model.generate(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) , )
_lowerCamelCase = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = model.generate(input_ids=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_lowerCamelCase = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings )
_lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = 3
_lowerCamelCase = input_dict['''input_ids''']
_lowerCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = 3
_lowerCamelCase = '''multi_label_classification'''
_lowerCamelCase = input_dict['''input_ids''']
_lowerCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
_lowerCamelCase = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
_lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )[0]
_lowerCamelCase = 4_2_3_8_4
_lowerCamelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def snake_case__ ( self ):
_lowerCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_lowerCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
_lowerCamelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = model.generate(
**SCREAMING_SNAKE_CASE_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , )
_lowerCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 720 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 0 |
"""simple docstring"""
class lowerCamelCase_: # Public class to implement a graph
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = row
_lowerCamelCase = col
_lowerCamelCase = graph
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Checking all 8 elements surrounding nth element
_lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , snake_case_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , snake_case_ )
def snake_case__ ( self ): # And finally, count all islands.
_lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(snake_case_ , snake_case_ , snake_case_ )
count += 1
return count
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : List[str] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_( lowercase_ : list[list[int]] , lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
_lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase_ ) )
] # the reference grid
_lowerCamelCase = 1
_lowerCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowercase_ ) )
] # the action grid
_lowerCamelCase = init[0]
_lowerCamelCase = init[1]
_lowerCamelCase = 0
_lowerCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell
_lowerCamelCase = [[f, g, x, y]]
_lowerCamelCase = False # flag that is set when search is complete
_lowerCamelCase = False # flag set if we can't find expand
while not found and not resign:
if len(lowercase_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowerCamelCase = cell.pop()
_lowerCamelCase = next_cell[2]
_lowerCamelCase = next_cell[3]
_lowerCamelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowerCamelCase = True
else:
for i in range(len(lowercase_ ) ): # to try out different valid actions
_lowerCamelCase = x + DIRECTIONS[i][0]
_lowerCamelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowercase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowerCamelCase = g + cost
_lowerCamelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowerCamelCase = 1
_lowerCamelCase = i
_lowerCamelCase = []
_lowerCamelCase = goal[0]
_lowerCamelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowerCamelCase = x - DIRECTIONS[action[x][y]][0]
_lowerCamelCase = y - DIRECTIONS[action[x][y]][1]
_lowerCamelCase = xa
_lowerCamelCase = ya
invpath.append([x, y] )
_lowerCamelCase = []
for i in range(len(lowercase_ ) ):
path.append(invpath[len(lowercase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__SCREAMING_SNAKE_CASE : str = [0, 0]
# all coordinates are given in format [y,x]
__SCREAMING_SNAKE_CASE : Dict = [len(grid) - 1, len(grid[0]) - 1]
__SCREAMING_SNAKE_CASE : int = 1
# the cost map which pushes the path closer to the goal
__SCREAMING_SNAKE_CASE : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__SCREAMING_SNAKE_CASE : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__SCREAMING_SNAKE_CASE : List[str] = 9_9
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 700 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 0 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = inspect.getfile(accelerate.test_utils )
_lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_lowerCamelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case__ ( self ):
_lowerCamelCase = F"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split()
_lowerCamelCase = [sys.executable] + distributed_args
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
| 701 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ : int = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_lowerCamelCase = text_generator('''This is a test''' , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_lowerCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__lowerCamelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_lowerCamelCase = text_generator('''This is a test''' , do_sample=__lowerCamelCase , num_return_sequences=2 , return_tensors=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{'''generated_token_ids''': ANY(__lowerCamelCase )},
{'''generated_token_ids''': ANY(__lowerCamelCase )},
] , )
_lowerCamelCase = text_generator.model.config.eos_token_id
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCamelCase , )
self.assertEqual(
__lowerCamelCase , [
[
{'''generated_token_ids''': ANY(__lowerCamelCase )},
{'''generated_token_ids''': ANY(__lowerCamelCase )},
],
[
{'''generated_token_ids''': ANY(__lowerCamelCase )},
{'''generated_token_ids''': ANY(__lowerCamelCase )},
],
] , )
@require_tf
def snake_case__ ( self ):
_lowerCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_lowerCamelCase = text_generator('''This is a test''' , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_lowerCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TextGenerationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def snake_case__ ( self ):
_lowerCamelCase = '''Hello I believe in'''
_lowerCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase = text_generator(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_lowerCamelCase = text_generator(__lowerCamelCase , stop_sequence=''' fe''' )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = text_generator.model
_lowerCamelCase = text_generator.tokenizer
_lowerCamelCase = text_generator('''This is a test''' )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_lowerCamelCase = text_generator('''This is a test''' , return_full_text=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ANY(__lowerCamelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_lowerCamelCase = pipeline(task='''text-generation''' , model=__lowerCamelCase , tokenizer=__lowerCamelCase , return_full_text=__lowerCamelCase )
_lowerCamelCase = text_generator('''This is a test''' )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ANY(__lowerCamelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_lowerCamelCase = text_generator('''This is a test''' , return_full_text=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_lowerCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_lowerCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
[{'''generated_text''': ANY(__lowerCamelCase )}, {'''generated_text''': ANY(__lowerCamelCase )}],
] , )
with self.assertRaises(__lowerCamelCase ):
_lowerCamelCase = text_generator('''test''' , return_full_text=__lowerCamelCase , return_text=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
_lowerCamelCase = text_generator('''test''' , return_full_text=__lowerCamelCase , return_tensors=__lowerCamelCase )
with self.assertRaises(__lowerCamelCase ):
_lowerCamelCase = text_generator('''test''' , return_text=__lowerCamelCase , return_tensors=__lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_lowerCamelCase = text_generator('''''' )
self.assertEqual(__lowerCamelCase , [{'''generated_text''': ANY(__lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_lowerCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_lowerCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_0_0 , max_new_tokens=2_0 )
_lowerCamelCase = text_generator('''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__lowerCamelCase ):
text_generator(
'''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case__ ( self ):
import torch
# Classic `model_kwargs`
_lowerCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_lowerCamelCase = pipe('''This is a test''' )
self.assertEqual(
__lowerCamelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_lowerCamelCase = pipe('''This is a test''' )
self.assertEqual(
__lowerCamelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_lowerCamelCase = pipe('''This is a test''' )
self.assertEqual(
__lowerCamelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def snake_case__ ( self ):
import torch
_lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def snake_case__ ( self ):
import torch
_lowerCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__lowerCamelCase , top_p=0.5 )
def snake_case__ ( self ):
_lowerCamelCase = '''Hello world'''
_lowerCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_lowerCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_lowerCamelCase = logging.get_logger('''transformers.generation.utils''' )
_lowerCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCamelCase ) as cl:
_lowerCamelCase = text_generator(__lowerCamelCase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCamelCase ) as cl:
_lowerCamelCase = text_generator(__lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCamelCase , cl.out )
with CaptureLogger(__lowerCamelCase ) as cl:
_lowerCamelCase = text_generator(__lowerCamelCase , max_length=1_0 )
self.assertNotIn(__lowerCamelCase , cl.out )
| 702 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowerCAmelCase_( lowercase_ : Any ) -> tuple[np.ndarray, np.ndarray]:
_lowerCamelCase , _lowerCamelCase = np.shape(UpperCamelCase__ )
if rows != columns:
_lowerCamelCase = (
'''\'table\' has to be of square shaped array but got a '''
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(UpperCamelCase__ )
_lowerCamelCase = np.zeros((rows, columns) )
_lowerCamelCase = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowerCamelCase = (table[i][j] - total) / upper[j][j]
_lowerCamelCase = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
_lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
_lowerCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_( lowercase_ : List[str] ) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Any ) -> Tuple:
if args.student_type == "roberta":
_lowerCamelCase = False
elif args.student_type == "gpt2":
_lowerCamelCase = False
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> Optional[int]:
if args.student_type == "roberta":
_lowerCamelCase = False
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__SCREAMING_SNAKE_CASE , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__SCREAMING_SNAKE_CASE , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__SCREAMING_SNAKE_CASE , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__SCREAMING_SNAKE_CASE , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__SCREAMING_SNAKE_CASE , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__SCREAMING_SNAKE_CASE , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__SCREAMING_SNAKE_CASE , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__SCREAMING_SNAKE_CASE , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=__SCREAMING_SNAKE_CASE , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=__SCREAMING_SNAKE_CASE , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__SCREAMING_SNAKE_CASE , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__SCREAMING_SNAKE_CASE , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=__SCREAMING_SNAKE_CASE , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__SCREAMING_SNAKE_CASE , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__SCREAMING_SNAKE_CASE , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__SCREAMING_SNAKE_CASE , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__SCREAMING_SNAKE_CASE , default=40_00 , help='''Checkpoint interval.''' )
_lowerCamelCase = parser.parse_args()
sanity_checks(__SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(__SCREAMING_SNAKE_CASE )
set_seed(__SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
_lowerCamelCase = MODEL_CLASSES[args.student_type]
_lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase = tokenizer.all_special_tokens.index(__SCREAMING_SNAKE_CASE )
_lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
_lowerCamelCase = special_tok_ids
_lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
_lowerCamelCase = pickle.load(__SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
_lowerCamelCase = pickle.load(__SCREAMING_SNAKE_CASE )
_lowerCamelCase = np.maximum(__SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase = 0.0 # do not predict special tokens
_lowerCamelCase = torch.from_numpy(__SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase = None
_lowerCamelCase = LmSeqsDataset(params=__SCREAMING_SNAKE_CASE , data=__SCREAMING_SNAKE_CASE )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
_lowerCamelCase = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=__SCREAMING_SNAKE_CASE )
else:
_lowerCamelCase = student_model_class(__SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase = Distiller(
params=__SCREAMING_SNAKE_CASE , dataset=__SCREAMING_SNAKE_CASE , token_probs=__SCREAMING_SNAKE_CASE , student=__SCREAMING_SNAKE_CASE , teacher=__SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def snake_case__ ( self ):
_lowerCamelCase = self.dummy_uncond_unet
_lowerCamelCase = PNDMScheduler()
_lowerCamelCase = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pndm(generator=A_ , num_inference_steps=2_0 , output_type='''numpy''' ).images
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pndm(generator=A_ , num_inference_steps=2_0 , output_type='''numpy''' , return_dict=A_ )[0]
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = '''google/ddpm-cifar10-32'''
_lowerCamelCase = UNetaDModel.from_pretrained(A_ )
_lowerCamelCase = PNDMScheduler()
_lowerCamelCase = PNDMPipeline(unet=A_ , scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pndm(generator=A_ , output_type='''numpy''' ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 705 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : int=1 ) -> int:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Optional[int]=0 ) -> Union[str, Any]:
_lowerCamelCase = []
for old_item in old_list:
_lowerCamelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
_lowerCamelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
_lowerCamelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
_lowerCamelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
_lowerCamelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
_lowerCamelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
_lowerCamelCase = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCAmelCase_( lowercase_ : str , lowercase_ : int=0 ) -> Dict:
_lowerCamelCase = []
for old_item in old_list:
_lowerCamelCase = old_item
_lowerCamelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
_lowerCamelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
_lowerCamelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
_lowerCamelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
_lowerCamelCase = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCAmelCase_( lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : str=None ) -> List[Any]:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCamelCase = old_checkpoint[path]
_lowerCamelCase = old_tensor.shape[0] // 3
_lowerCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCamelCase = old_tensor.shape[0] // config["num_head_channels"] // 3
_lowerCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCamelCase = old_tensor.split(channels // num_heads , dim=1 )
_lowerCamelCase = query.reshape(_lowerCamelCase )
_lowerCamelCase = key.reshape(_lowerCamelCase )
_lowerCamelCase = value.reshape(_lowerCamelCase )
for path in paths:
_lowerCamelCase = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCamelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
_lowerCamelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
_lowerCamelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCamelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCamelCase = old_checkpoint[path["old"]][:, :, 0]
else:
_lowerCamelCase = old_checkpoint[path["old"]]
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[Any] ) -> str:
_lowerCamelCase = {}
_lowerCamelCase = checkpoint["time_embed.0.weight"]
_lowerCamelCase = checkpoint["time_embed.0.bias"]
_lowerCamelCase = checkpoint["time_embed.2.weight"]
_lowerCamelCase = checkpoint["time_embed.2.bias"]
_lowerCamelCase = checkpoint["input_blocks.0.0.weight"]
_lowerCamelCase = checkpoint["input_blocks.0.0.bias"]
_lowerCamelCase = checkpoint["out.0.weight"]
_lowerCamelCase = checkpoint["out.0.bias"]
_lowerCamelCase = checkpoint["out.2.weight"]
_lowerCamelCase = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
_lowerCamelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_lowerCamelCase = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCamelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_lowerCamelCase = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(_lowerCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCamelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_lowerCamelCase = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(_lowerCamelCase )
}
for i in range(1 , _lowerCamelCase ):
_lowerCamelCase = (i - 1) // (config["num_res_blocks"] + 1)
_lowerCamelCase = (i - 1) % (config["num_res_blocks"] + 1)
_lowerCamelCase = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
_lowerCamelCase = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
_lowerCamelCase = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
_lowerCamelCase = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
_lowerCamelCase = renew_resnet_paths(_lowerCamelCase )
_lowerCamelCase = {"old": F"""input_blocks.{i}.0""", "new": F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_lowerCamelCase = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCamelCase )
if len(_lowerCamelCase ):
_lowerCamelCase = renew_attention_paths(_lowerCamelCase )
_lowerCamelCase = {
"old": F"""input_blocks.{i}.1""",
"new": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_lowerCamelCase = {
F"""input_blocks.{i}.1.qkv.bias""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase , )
_lowerCamelCase = middle_blocks[0]
_lowerCamelCase = middle_blocks[1]
_lowerCamelCase = middle_blocks[2]
_lowerCamelCase = renew_resnet_paths(_lowerCamelCase )
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
_lowerCamelCase = renew_resnet_paths(_lowerCamelCase )
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
_lowerCamelCase = renew_attention_paths(_lowerCamelCase )
_lowerCamelCase = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCamelCase = i // (config["num_res_blocks"] + 1)
_lowerCamelCase = i % (config["num_res_blocks"] + 1)
_lowerCamelCase = [shave_segments(_lowerCamelCase , 2 ) for name in output_blocks[i]]
_lowerCamelCase = {}
for layer in output_block_layers:
_lowerCamelCase = layer.split('''.''' )[0], shave_segments(_lowerCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCamelCase )
else:
_lowerCamelCase = [layer_name]
if len(_lowerCamelCase ) > 1:
_lowerCamelCase = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
_lowerCamelCase = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
_lowerCamelCase = renew_resnet_paths(_lowerCamelCase )
_lowerCamelCase = renew_resnet_paths(_lowerCamelCase )
_lowerCamelCase = {"old": F"""output_blocks.{i}.0""", "new": F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCamelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_lowerCamelCase = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
_lowerCamelCase = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(_lowerCamelCase ) == 2:
_lowerCamelCase = []
if len(_lowerCamelCase ):
_lowerCamelCase = renew_attention_paths(_lowerCamelCase )
_lowerCamelCase = {
"old": F"""output_blocks.{i}.1""",
"new": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_lowerCamelCase = {
F"""output_blocks.{i}.1.qkv.bias""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_lowerCamelCase , )
else:
_lowerCamelCase = renew_resnet_paths(_lowerCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCamelCase = ".".join(['''output_blocks''', str(_lowerCamelCase ), path['''old''']] )
_lowerCamelCase = ".".join(['''up_blocks''', str(_lowerCamelCase ), '''resnets''', str(_lowerCamelCase ), path['''new''']] )
_lowerCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Tuple = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(f.read())
__SCREAMING_SNAKE_CASE : Any = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__SCREAMING_SNAKE_CASE : List[str] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__SCREAMING_SNAKE_CASE : Any = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 706 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE : str = logging.getLogger()
def lowerCAmelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_lowerCamelCase = parser.parse_args()
return args.f
class lowerCamelCase_( A__ ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
_lowerCamelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__UpperCamelCase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self ):
_lowerCamelCase = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__UpperCamelCase )
_lowerCamelCase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__UpperCamelCase )
_lowerCamelCase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__UpperCamelCase )
| 707 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 0 |
"""simple docstring"""
class lowerCamelCase_: # Public class to implement a graph
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = row
_lowerCamelCase = col
_lowerCamelCase = graph
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Checking all 8 elements surrounding nth element
_lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ )
def snake_case__ ( self ): # And finally, count all islands.
_lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += 1
return count
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( __A ):
'''simple docstring'''
lowercase__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
_lowerCamelCase = size if size is not None else {'shortest_edge': 2_5_6}
_lowerCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
_lowerCamelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
_lowerCamelCase = get_size_dict(UpperCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_center_crop
_lowerCamelCase = crop_size
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowerCamelCase = get_resize_output_image_size(UpperCamelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase = get_size_dict(UpperCamelCase__ )
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
_lowerCamelCase = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
_lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 709 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__SCREAMING_SNAKE_CASE : Dict = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : int=1.0 , lowercase_ : List[Any]=None , lowercase_ : str=None ) -> Tuple:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1 , lowerCamelCase__=0.0 , lowerCamelCase__=1_6_0_0_0 , lowerCamelCase__=True , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = feature_size
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
_lowerCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( a__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = ASTFeatureExtractor
def snake_case__ ( self ):
_lowerCamelCase = ASTFeatureExtractionTester(self )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_lowerCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
_lowerCamelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase_ )
_lowerCamelCase = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
_lowerCamelCase = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
@require_torch
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
from datasets import load_dataset
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = ASTFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCamelCase_ , atol=1e-4 ) )
| 710 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 2_0
_lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
_lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCamelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCamelCase = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
_lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 1_0
_lowerCamelCase = 2
# create ramp distribution
_lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
_lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCamelCase = 5
_lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
_lowerCamelCase = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = 1_0
_lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
_lowerCamelCase = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCamelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
_lowerCamelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
_lowerCamelCase = 5
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = 1_5
_lowerCamelCase = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
_lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
_lowerCamelCase = 1
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCamelCase = 3
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 2_0
_lowerCamelCase = 4
_lowerCamelCase = 0
_lowerCamelCase = 5
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
_lowerCamelCase = 4
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCamelCase = 3
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def snake_case__ ( self ):
_lowerCamelCase = 4
_lowerCamelCase = 1_0
_lowerCamelCase = 1_5
_lowerCamelCase = 2
_lowerCamelCase = 1
_lowerCamelCase = 1_5
# dummy input_ids and scores
_lowerCamelCase = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
_lowerCamelCase = input_ids.copy()
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = scores.copy()
# instantiate all dist processors
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCamelCase__ )
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
_lowerCamelCase = 1_0
# no processor list
_lowerCamelCase = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
_lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case__ ( self ):
_lowerCamelCase = 4
_lowerCamelCase = 1_0
_lowerCamelCase = 1_5
_lowerCamelCase = 2
_lowerCamelCase = 1
_lowerCamelCase = 1_5
# dummy input_ids and scores
_lowerCamelCase = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
_lowerCamelCase = input_ids.copy()
_lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = scores.copy()
# instantiate all dist processors
_lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase = FlaxTopKLogitsWarper(3 )
_lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCamelCase__ )
_lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
_lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
_lowerCamelCase = 1_0
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
_lowerCamelCase = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
_lowerCamelCase = jax.jit(UpperCamelCase__ )
_lowerCamelCase = jax.jit(UpperCamelCase__ )
_lowerCamelCase = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_lowerCamelCase = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 711 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__SCREAMING_SNAKE_CASE : int = 5_0_0_0_3
__SCREAMING_SNAKE_CASE : Optional[Any] = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( __UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = PLBartTokenizer
lowercase__ : int = None
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = PLBartTokenizer(lowerCAmelCase_ , language_codes='''base''' , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
_lowerCamelCase = PLBartTokenizer(lowerCAmelCase_ , language_codes='''base''' , keep_accents=lowerCAmelCase_ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_lowerCamelCase = tokenizer.vocab_size
_lowerCamelCase = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 4 , lowerCAmelCase_ )]
self.assertListEqual(lowerCAmelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
_lowerCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_lowerCamelCase = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
def snake_case__ ( self ):
_lowerCamelCase = PLBartTokenizer(lowerCAmelCase_ , language_codes='''multi''' , keep_accents=lowerCAmelCase_ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_lowerCamelCase = tokenizer.vocab_size
_lowerCamelCase = [tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) for x in range(end - 7 , lowerCAmelCase_ )]
self.assertListEqual(
lowerCAmelCase_ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
_lowerCamelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_lowerCamelCase = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) , lowerCAmelCase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = 'uclanlp/plbart-python-en_XX'
lowercase__ : Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
lowercase__ : str = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
lowercase__ : List[str] = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def snake_case__ ( cls ):
_lowerCamelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
_lowerCamelCase = 1
return cls
def snake_case__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0_0_0_3 )
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def snake_case__ ( self ):
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
_lowerCamelCase = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_lowerCamelCase = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def snake_case__ ( self ):
_lowerCamelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 2_0]
self.assertIsInstance(src_text[0] , lowerCAmelCase_ )
_lowerCamelCase = 1_0
_lowerCamelCase = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def snake_case__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0_0_0_4, 5_0_0_0_1] )
def snake_case__ ( self ):
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
_lowerCamelCase = PLBartTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors='''pt''' )
_lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
_lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors='''pt''' )
_lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1_0 , return_tensors='''pt''' )
_lowerCamelCase = targets['''input_ids''']
_lowerCamelCase = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0_0_0_1,
} , )
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Tuple = TypeVar('''T''')
class lowerCamelCase_( Generic[T] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 42 # Cache store of keys
lowercase__ : Any = 42 # References of the keys in cache
lowercase__ : Optional[int] = 10 # Maximum capacity of cache
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = deque()
_lowerCamelCase = set()
if not n:
_lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_lowerCamelCase = n
def snake_case__ ( self , lowerCamelCase__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def snake_case__ ( self ):
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCAmelCase_( lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=None ) -> Dict:
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : Optional[int] = field(
metadata={'help': 'The csv file to plot.'}, )
lowercase__ : str = field(
default=_a, metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'}, )
lowercase__ : Dict = field(
default=_a, metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'}, )
lowercase__ : Any = field(
default=_a, metadata={'help': 'Disable logarithmic scale when plotting'}, )
lowercase__ : Optional[int] = field(
default=_a, metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
}, )
lowercase__ : Any = field(
default=_a, metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'}, )
lowercase__ : Union[str, Any] = list_field(
default=_a, metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowerCAmelCase_( lowercase_ : Dict ) -> Union[str, Any]:
try:
int(UpperCAmelCase__ )
return True
except ValueError:
return False
def lowerCAmelCase_( lowercase_ : Tuple ) -> int:
try:
float(UpperCAmelCase__ )
return True
except ValueError:
return False
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = args
_lowerCamelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
_lowerCamelCase = csv.DictReader(_A )
for row in reader:
_lowerCamelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
_lowerCamelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
_lowerCamelCase = float(row['''result'''] )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = plt.subplots()
_lowerCamelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
_lowerCamelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_lowerCamelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
_lowerCamelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
_lowerCamelCase = self.result_dict[model_name]['''result''']
((_lowerCamelCase) , (_lowerCamelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_lowerCamelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_lowerCamelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
_lowerCamelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_lowerCamelCase) , (_lowerCamelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
_lowerCamelCase = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_A , _A , '''--''' )
title_str += F""" {label_model_name} vs."""
_lowerCamelCase = title_str[:-4]
_lowerCamelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCAmelCase_( ) -> Union[str, Any]:
_lowerCamelCase = HfArgumentParser(UpperCAmelCase__ )
_lowerCamelCase = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase = Plot(args=UpperCAmelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {'''facebook/blenderbot-3B''': 1_2_8}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ['input_ids', 'attention_mask']
lowercase__ : List[Any] = BlenderbotTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_lowerCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = pre_tok_class(**__UpperCamelCase )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = '''post_processor'''
_lowerCamelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase = tuple(state['''cls'''] )
_lowerCamelCase = False
if state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_lowerCamelCase = add_prefix_space
_lowerCamelCase = True
if state.get('''trim_offsets''' , __UpperCamelCase ) != trim_offsets:
_lowerCamelCase = trim_offsets
_lowerCamelCase = True
if changes_to_apply:
_lowerCamelCase = getattr(__UpperCamelCase , state.pop('''type''' ) )
_lowerCamelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_lowerCamelCase = value
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.get('''is_split_into_words''' , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.get('''is_split_into_words''' , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_lowerCamelCase = ''' '''.join(__UpperCamelCase )
_lowerCamelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_lowerCamelCase = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 715 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = """perceiver"""
def __init__( self , lowerCamelCase__=2_5_6 , lowerCamelCase__=1_2_8_0 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1 , lowerCamelCase__=2_6 , lowerCamelCase__=8 , lowerCamelCase__=8 , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="kv" , lowerCamelCase__=1 , lowerCamelCase__=1 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=True , lowerCamelCase__=2_6_2 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=5_6 , lowerCamelCase__=[3_6_8, 4_9_6] , lowerCamelCase__=1_6 , lowerCamelCase__=1_9_2_0 , lowerCamelCase__=1_6 , lowerCamelCase__=[1, 1_6, 2_2_4, 2_2_4] , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = num_latents
_lowerCamelCase = d_latents
_lowerCamelCase = d_model
_lowerCamelCase = num_blocks
_lowerCamelCase = num_self_attends_per_block
_lowerCamelCase = num_self_attention_heads
_lowerCamelCase = num_cross_attention_heads
_lowerCamelCase = qk_channels
_lowerCamelCase = v_channels
_lowerCamelCase = cross_attention_shape_for_attention
_lowerCamelCase = self_attention_widening_factor
_lowerCamelCase = cross_attention_widening_factor
_lowerCamelCase = hidden_act
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_query_residual
# masked language modeling attributes
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
# image classification attributes
_lowerCamelCase = image_size
# flow attributes
_lowerCamelCase = train_size
# multimodal autoencoding attributes
_lowerCamelCase = num_frames
_lowerCamelCase = audio_samples_per_frame
_lowerCamelCase = samples_per_patch
_lowerCamelCase = output_shape
class lowerCamelCase_( A__ ):
'''simple docstring'''
@property
def snake_case__ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def snake_case__ ( self ):
return 1e-4
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 3 , lowerCamelCase__ = 4_0 , lowerCamelCase__ = 4_0 , ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(
lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase = preprocessor.num_special_tokens_to_add(lowerCamelCase__ )
_lowerCamelCase = compute_effective_axis_dimension(
lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase = [''' '''.join(['''a'''] ) * seq_length] * batch_size
_lowerCamelCase = dict(preprocessor(lowerCamelCase__ , return_tensors=lowerCamelCase__ ) )
_lowerCamelCase = inputs.pop('''input_ids''' )
return inputs
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(lowerCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
_lowerCamelCase = self._generate_dummy_images(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = dict(preprocessor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) )
_lowerCamelCase = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 716 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 0 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 717 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1_0 , lowerCamelCase__=3 , lowerCamelCase__=3_2 * 4 , lowerCamelCase__=3_2 * 6 , lowerCamelCase__=4 , lowerCamelCase__=3_2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = is_training
_lowerCamelCase = use_auxiliary_loss
_lowerCamelCase = num_queries
_lowerCamelCase = num_channels
_lowerCamelCase = min_size
_lowerCamelCase = max_size
_lowerCamelCase = num_labels
_lowerCamelCase = mask_feature_size
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = output.encoder_hidden_states
_lowerCamelCase = output.pixel_decoder_hidden_states
_lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
with torch.no_grad():
_lowerCamelCase = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_( __lowerCAmelCase, __lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowercase__ : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = MaskFormerModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def snake_case__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case__ ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = (self.model_tester.min_size,) * 2
_lowerCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=lowerCamelCase__ ),
'''class_labels''': torch.zeros(2 , 1_0 , device=lowerCamelCase__ ).long(),
}
_lowerCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase = self.all_model_classes[1]
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def snake_case__ ( self ):
_lowerCamelCase = self.all_model_classes[1]
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE : Optional[int] = 1e-4
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def snake_case__ ( self ):
_lowerCamelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
_lowerCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_lowerCamelCase = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_lowerCamelCase = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase__ )
.eval()
)
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
_lowerCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
_lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCamelCase__ )
.eval()
)
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
_lowerCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
_lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase__ )
.eval()
)
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase = inputs['''pixel_values'''].to(lowerCamelCase__ )
_lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['''mask_labels''']]
_lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 718 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE : int = [
'good first issue',
'feature request',
'wip',
]
def lowerCAmelCase_( ) -> Optional[int]:
_lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
_lowerCamelCase = g.get_repo('''huggingface/accelerate''' )
_lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
_lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=snake_case_ )
_lowerCamelCase = comments[0] if len(snake_case_ ) > 0 else None
_lowerCamelCase = dt.utcnow()
_lowerCamelCase = (current_time - issue.updated_at).days
_lowerCamelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 719 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowerCamelCase_( lowercase_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 720 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__SCREAMING_SNAKE_CASE : List[str] = ''' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'''
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_lowerCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def snake_case__ ( self ):
_lowerCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
_lowerCamelCase = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_lowerCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCamelCase__ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_lowerCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCamelCase__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCamelCase__ ) , )
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionLDMaDPipeline
lowercase__ : str = TEXT_TO_IMAGE_PARAMS
lowercase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_lowerCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb[0, -3:, -3:, -1]
_lowerCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_lowerCamelCase = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_lowerCamelCase = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * [inputs['''prompt''']]
# forward
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_lowerCamelCase = depth_slice_a[0, -3:, -1]
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
_lowerCamelCase = ldmad_pipe.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
_lowerCamelCase = text_inputs['''input_ids'''].to(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.text_encoder(lowerCamelCase__ )[0]
_lowerCamelCase = prompt_embeds
# forward
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_lowerCamelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
_lowerCamelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = '''french fries'''
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb[0, -3:, -3:, -1]
_lowerCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_lowerCamelCase = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_lowerCamelCase = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb[0, -3:, -3:, -1].flatten()
_lowerCamelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
_lowerCamelCase = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_lowerCamelCase = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = 0.4_9_5_5_8_6
_lowerCamelCase = 0.3_3_7_9_5_5_1_5
_lowerCamelCase = 1_1_2.4_8_5_1_8
_lowerCamelCase = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = 0.4_1_9_4_1_2_7
_lowerCamelCase = 0.3_5_3_7_5_5_8_6
_lowerCamelCase = 0.5_6_3_8_5_0_2
_lowerCamelCase = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 700 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Text data.
Second line of data.'''
__SCREAMING_SNAKE_CASE : str = '''file'''
@pytest.fixture(scope='''session''' )
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[int]:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_lowerCamelCase = bytes(lowercase_ , '''utf-8''' )
with zstd.open(lowercase_ , '''wb''' ) as f:
f.write(lowercase_ )
return path
@pytest.fixture
def lowerCAmelCase_( lowercase_ : str ) -> Optional[int]:
with open(os.path.join(tmpfs.local_root_dir , lowercase_ ) , '''w''' ) as f:
f.write(lowercase_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str ) -> Any:
_lowerCamelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_lowerCamelCase = input_paths[compression_format]
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = DownloadConfig(cache_dir=lowercase_ , extract_compressed_file=lowercase_ )
_lowerCamelCase = cached_path(lowercase_ , download_config=lowercase_ )
with open(lowercase_ ) as f:
_lowerCamelCase = f.read()
with open(lowercase_ ) as f:
_lowerCamelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] ) -> Optional[int]:
_lowerCamelCase = '''custom_cache'''
_lowerCamelCase = '''custom_extracted_dir'''
_lowerCamelCase = tmp_path / '''custom_extracted_path'''
if default_extracted:
_lowerCamelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowercase_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
_lowerCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase = xz_file
_lowerCamelCase = (
DownloadConfig(extract_compressed_file=lowercase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase_ )
)
_lowerCamelCase = cached_path(lowercase_ , download_config=lowercase_ )
assert Path(lowercase_ ).parent.parts[-2:] == expected
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[int]:
# absolute path
_lowerCamelCase = str(Path(lowercase_ ).resolve() )
assert cached_path(lowercase_ ) == text_file
# relative path
_lowerCamelCase = str(Path(lowercase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase_ ) == text_file
def lowerCAmelCase_( lowercase_ : str ) -> List[str]:
# absolute path
_lowerCamelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
# relative path
_lowerCamelCase = '''./__missing_file__.txt'''
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Tuple:
_lowerCamelCase = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(lowercase_ ) as f:
_lowerCamelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( ) -> Optional[Any]:
with pytest.raises(lowercase_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> int:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
http_get('''https://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : int ) -> Optional[int]:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
fsspec_head('''s3://huggingface.co''' )
| 701 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> List[Any]:
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
_lowerCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "running_mean":
_lowerCamelCase = value
elif weight_type == "running_var":
_lowerCamelCase = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase = value
elif weight_type == "inv_freq":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(lowercase_ )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , lowercase_ )
if "pos_bias_u" in name:
_lowerCamelCase = None
elif "pos_bias_v" in name:
_lowerCamelCase = None
elif "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
elif "running_mean" in name:
_lowerCamelCase = '''running_mean'''
elif "inv_freq" in name:
_lowerCamelCase = '''inv_freq'''
elif "running_var" in name:
_lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCamelCase = '''num_batches_tracked'''
else:
_lowerCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict ) -> Union[str, Any]:
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Tuple , lowercase_ : Any=None , lowercase_ : int=None , lowercase_ : Union[str, Any]=True ) -> int:
if config_path is not None:
_lowerCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase_ , hidden_act='''swish''' )
else:
_lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase = '''rotary'''
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 0
_lowerCamelCase = 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase = WavaVecaConformerForCTC(lowercase_ )
else:
_lowerCamelCase = WavaVecaConformerForPreTraining(lowercase_ )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
_lowerCamelCase = fairseq.tasks.setup_task(lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
_lowerCamelCase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 0 |
"""simple docstring"""
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 703 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 0 |
"""simple docstring"""
from math import isqrt
def lowerCAmelCase_( lowercase_ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase_ ) + 1 ) )
def lowerCAmelCase_( lowercase_ : int = 10**6 ) -> int:
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 704 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 0 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 705 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_( ) -> str:
raise RuntimeError('''CUDA out of memory.''' )
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase = nn.Linear(3 , 4 )
_lowerCamelCase = nn.BatchNormad(4 )
_lowerCamelCase = nn.Linear(4 , 5 )
def snake_case__ ( self , lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self ):
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ , lowerCamelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowerCamelCase , _lowerCamelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase__ ):
pass
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCamelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCamelCase__ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self ):
_lowerCamelCase = torch.cuda.memory_allocated()
_lowerCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase__ )
_lowerCamelCase = release_memory(lowerCamelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase__ )
| 706 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Any = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_8_4,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_2_8,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=2_0,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=3_0,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__SCREAMING_SNAKE_CASE : Any = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : List[str] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__SCREAMING_SNAKE_CASE : List[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__SCREAMING_SNAKE_CASE : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__SCREAMING_SNAKE_CASE : str = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : Dict = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : str = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Any = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
_lowerCamelCase = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
_lowerCamelCase = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase_ )
# start time
_lowerCamelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase_ ) for d_inp in d_inputs] + [int(lowercase_ ), int(lowercase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCamelCase = time.time()
_lowerCamelCase = end_time - start_time
_lowerCamelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''validation'''].column_names
__SCREAMING_SNAKE_CASE : int = '''question''' if '''question''' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = '''context''' if '''context''' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Optional[Any] = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase_( lowercase_ : Dict ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCamelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=lowercase_ , stride=args.doc_stride , return_overflowing_tokens=lowercase_ , return_offsets_mapping=lowercase_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCamelCase = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCamelCase = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCamelCase = tokenized_examples.sequence_ids(lowercase_ )
_lowerCamelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCamelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCamelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets['''validation''']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__SCREAMING_SNAKE_CASE : Optional[int] = default_data_collator
__SCREAMING_SNAKE_CASE : Dict = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]="eval" ) -> Any:
'''simple docstring'''
_lowerCamelCase = postprocess_qa_predictions(
examples=lowercase_ , features=lowercase_ , predictions=lowercase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCamelCase = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
_lowerCamelCase = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
_lowerCamelCase = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase_ , label_ids=lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase_( lowercase_ : Dict ) -> Dict:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(lowercase_ ) ) * engine.get_binding_dtype(lowercase_ ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : List[str] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
__SCREAMING_SNAKE_CASE : List[str] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Any = timeit.default_timer()
__SCREAMING_SNAKE_CASE : int = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE : int = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE : int = outputs
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__SCREAMING_SNAKE_CASE : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Any = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : Optional[Any] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0))
logger.info('''Total Number of Inference = %d''', niter)
__SCREAMING_SNAKE_CASE : Optional[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : Dict = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 707 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
_lowerCamelCase = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = '''▁'''
__SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE : str = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__ = None , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase = 1
_lowerCamelCase = len(self.sp_model )
_lowerCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase__ )
}
_lowerCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase = src_lang if src_lang is not None else '''en_XX'''
_lowerCamelCase = self.lang_code_to_id[self._src_lang]
_lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case__ ( self ):
return self._src_lang
@src_lang.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self , lowerCamelCase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
_lowerCamelCase = ''''''
_lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_lowerCamelCase = True
_lowerCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_lowerCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase = src_lang
_lowerCamelCase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ )
_lowerCamelCase = tgt_lang_id
return inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = "en_XX" , lowerCamelCase__ = None , lowerCamelCase__ = "ro_RO" , **lowerCamelCase__ , ):
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[src_lang]
_lowerCamelCase = [self.cur_lang_code_id]
_lowerCamelCase = [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[tgt_lang]
_lowerCamelCase = [self.cur_lang_code_id]
_lowerCamelCase = [self.eos_token_id]
| 709 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = 'altclip_text_model'
def __init__( self , lowerCamelCase__=2_5_0_0_0_2 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_4 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=7_6_8 , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = initializer_factor
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = project_dim
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any = 'altclip_vision_model'
def __init__( self , lowerCamelCase__=7_6_8 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3 , lowerCamelCase__=2_2_4 , lowerCamelCase__=3_2 , lowerCamelCase__="quick_gelu" , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1.0 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = intermediate_size
_lowerCamelCase = projection_dim
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = image_size
_lowerCamelCase = initializer_range
_lowerCamelCase = initializer_factor
_lowerCamelCase = attention_dropout
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = hidden_act
@classmethod
def snake_case__ ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
cls._set_token_in_kwargs(lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
_lowerCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[int] = 'altclip'
lowercase__ : str = True
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=7_6_8 , lowerCamelCase__=2.6_5_9_2 , **lowerCamelCase__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_lowerCamelCase = kwargs.pop('''text_config_dict''' , lowerCamelCase__ )
_lowerCamelCase = kwargs.pop('''vision_config_dict''' , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
_lowerCamelCase = AltCLIPTextConfig(**lowerCamelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_lowerCamelCase = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCamelCase = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
_lowerCamelCase = AltCLIPVisionConfig(**lowerCamelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_lowerCamelCase = {
str(lowerCamelCase__ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_lowerCamelCase = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCamelCase = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCamelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_lowerCamelCase = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
_lowerCamelCase = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
_lowerCamelCase = AltCLIPTextConfig(**lowerCamelCase__ )
_lowerCamelCase = AltCLIPVisionConfig(**lowerCamelCase__ )
_lowerCamelCase = projection_dim
_lowerCamelCase = logit_scale_init_value
_lowerCamelCase = 1.0
@classmethod
def snake_case__ ( cls , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.text_config.to_dict()
_lowerCamelCase = self.vision_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 710 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 , lowercase_ : int = 22 ) -> int:
_lowerCamelCase = range(1 , lowercase_ )
_lowerCamelCase = range(1 , lowercase_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(1_0, 2_2) = }""")
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
import math
def lowerCAmelCase_( lowercase_ : int = 1_00 ) -> int:
_lowerCamelCase = sum(i * i for i in range(1 , n + 1 ) )
_lowerCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""") | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
import operator as op
def lowerCAmelCase_( lowercase_ : Dict ) -> Dict:
_lowerCamelCase = []
_lowerCamelCase = lambda lowercase_ , lowercase_ : int(x / y ) # noqa: E731 integer division operation
_lowerCamelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(lowercase_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' )
else:
_lowerCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' )
_lowerCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(lowercase_ ) , int(lowercase_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Dict ) -> List[str]:
# save results
if os.path.exists(lowercase_ ):
if os.path.exists(os.path.join(lowercase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowercase_ , '''config.json''' ) ):
os.remove(os.path.join(lowercase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowercase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowercase_ )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : int=False ) -> Optional[int]:
_lowerCamelCase = 2
if unlogit:
_lowerCamelCase = torch.pow(lowercase_ , lowercase_ )
_lowerCamelCase = p * torch.log(lowercase_ )
_lowerCamelCase = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> List[Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(lowercase_ ) ) ) )
for row in range(len(lowercase_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , lowercase_ : Dict=False ) -> Union[str, Any]:
_lowerCamelCase , _lowerCamelCase = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase = torch.zeros(lowercase_ , lowercase_ ).to(args.device )
_lowerCamelCase = torch.zeros(lowercase_ , lowercase_ ).to(args.device )
if head_mask is None:
_lowerCamelCase = torch.ones(lowercase_ , lowercase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowercase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase = None
_lowerCamelCase = 0.0
_lowerCamelCase = 0.0
for step, inputs in enumerate(tqdm(lowercase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase = tuple(t.to(args.device ) for t in inputs )
((_lowerCamelCase ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase = model(lowercase_ , labels=lowercase_ , head_mask=lowercase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowercase_ ):
_lowerCamelCase = entropy(attn.detach() , lowercase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowercase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase = 2
_lowerCamelCase = torch.pow(torch.pow(lowercase_ , lowercase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCamelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowercase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowercase_ )
logger.info('''Head ranked by importance scores''' )
_lowerCamelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase = head_ranks.view_as(lowercase_ )
print_ad_tensor(lowercase_ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int , lowercase_ : Dict ) -> Any:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ )
_lowerCamelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowercase_ , original_score * args.masking_threshold )
_lowerCamelCase = torch.ones_like(lowercase_ )
_lowerCamelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase = float('''Inf''' )
_lowerCamelCase = head_importance.view(-1 ).sort()[1]
if len(lowercase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_lowerCamelCase = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase = new_head_mask.view(-1 )
_lowerCamelCase = 0.0
_lowerCamelCase = new_head_mask.view_as(lowercase_ )
_lowerCamelCase = new_head_mask.clone().detach()
print_ad_tensor(lowercase_ )
# Compute metric and head importance again
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , head_mask=lowercase_ )
_lowerCamelCase = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowercase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowercase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , compute_importance=lowercase_ , head_mask=lowercase_ )
_lowerCamelCase = 1 / loss
_lowerCamelCase = datetime.now() - before_time
_lowerCamelCase = sum(p.numel() for p in model.parameters() )
_lowerCamelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = [
v,
]
assert sum(len(lowercase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowercase_ )
_lowerCamelCase = sum(p.numel() for p in model.parameters() )
_lowerCamelCase = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , compute_importance=lowercase_ , head_mask=lowercase_ , actually_pruned=lowercase_ , )
_lowerCamelCase = 1 / loss
_lowerCamelCase = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowercase_ , lowercase_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowercase_ , lowercase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(lowercase_ , args.output_dir )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowercase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowercase_ , type=lowercase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowercase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowercase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowercase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowercase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=lowercase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowercase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowercase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowercase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowercase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowercase_ , default='''''' , help='''Can be used for distant debugging.''' )
_lowerCamelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_lowerCamelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase = torch.device('''cuda''' , args.local_rank )
_lowerCamelCase = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase = nn.parallel.DistributedDataParallel(
lowercase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase_ )
elif args.n_gpu > 1:
_lowerCamelCase = nn.DataParallel(lowercase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowercase_ )
torch.save(lowercase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Prepare dataset
_lowerCamelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase = (torch.from_numpy(lowercase_ ),)
_lowerCamelCase = TensorDataset(*lowercase_ )
_lowerCamelCase = RandomSampler(lowercase_ )
_lowerCamelCase = DataLoader(lowercase_ , sampler=lowercase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowercase_ , lowercase_ , lowercase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase = mask_heads(lowercase_ , lowercase_ , lowercase_ )
prune_heads(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 715 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 0 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Tuple=0 ) -> Any:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowercase_ , lowercase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowercase_ , lowercase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase = os.path.join(lowercase_ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
_lowerCamelCase = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=lowercase_ , storage_writer=dist_cp.FileSystemWriter(lowercase_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any=0 ) -> str:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowercase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_lowerCamelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Loading model from {input_model_file}""" )
_lowerCamelCase = torch.load(lowercase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Loading model from {input_model_file}""" )
_lowerCamelCase = torch.load(lowercase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase = (
os.path.join(lowercase_ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
_lowerCamelCase = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowercase_ , storage_reader=dist_cp.FileSystemReader(lowercase_ ) , planner=DefaultLoadPlanner() , )
_lowerCamelCase = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict=0 ) -> Tuple:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase = FSDP.optim_state_dict(lowercase_ , lowercase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCamelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(lowercase_ , lowercase_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
_lowerCamelCase = os.path.join(lowercase_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any]=0 ) -> Tuple:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCamelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
_lowerCamelCase = torch.load(lowercase_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
_lowerCamelCase = (
os.path.join(lowercase_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
_lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(lowercase_ ) , )
_lowerCamelCase = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
_lowerCamelCase = FSDP.optim_state_dict_to_load(lowercase_ , lowercase_ , lowercase_ )
optimizer.load_state_dict(lowercase_ )
| 716 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'swin2sr'
lowercase__ : Tuple = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCamelCase__=6_4 , lowerCamelCase__=1 , lowerCamelCase__=3 , lowerCamelCase__=1_8_0 , lowerCamelCase__=[6, 6, 6, 6, 6, 6] , lowerCamelCase__=[6, 6, 6, 6, 6, 6] , lowerCamelCase__=8 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__=2 , lowerCamelCase__=1.0 , lowerCamelCase__="1conv" , lowerCamelCase__="pixelshuffle" , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = num_heads
_lowerCamelCase = window_size
_lowerCamelCase = mlp_ratio
_lowerCamelCase = qkv_bias
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_absolute_embeddings
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = upscale
_lowerCamelCase = img_range
_lowerCamelCase = resi_connection
_lowerCamelCase = upsampler
| 717 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 0 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 0 |
"""simple docstring"""
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__SCREAMING_SNAKE_CASE : List[str] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
__SCREAMING_SNAKE_CASE : List[Any] = '''|'''.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[int] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 719 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : str = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : List[str] = False
lowercase__ : List[str] = False
lowercase__ : Dict = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 720 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase__ : Optional[str] = field(
default='NER', metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase__ : bool = field(default=A__, metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'}, )
lowercase__ : int = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowercase__ : bool = field(
default=A__, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCAmelCase_( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
_lowerCamelCase = import_module('''tasks''' )
try:
_lowerCamelCase = getattr(lowercase_ , model_args.task_type )
_lowerCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowerCamelCase = token_classification_task.get_labels(data_args.labels )
_lowerCamelCase = dict(enumerate(lowercase_ ) )
_lowerCamelCase = len(lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel=lowercase_ , labelaid={label: i for i, label in enumerate(lowercase_ )} , cache_dir=model_args.cache_dir , )
_lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowerCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase_ : np.ndarray , lowercase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
_lowerCamelCase = np.argmax(lowercase_ , axis=2 )
_lowerCamelCase , _lowerCamelCase = preds.shape
_lowerCamelCase = [[] for _ in range(lowercase_ )]
_lowerCamelCase = [[] for _ in range(lowercase_ )]
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase_ : EvalPrediction ) -> Dict:
_lowerCamelCase , _lowerCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
# Data collator
_lowerCamelCase = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCamelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , compute_metrics=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase = trainer.evaluate()
_lowerCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowercase_ , lowercase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase_ )
# Predict
if training_args.do_predict:
_lowerCamelCase = TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = trainer.predict(lowercase_ )
_lowerCamelCase , _lowerCamelCase = align_predictions(lowercase_ , lowercase_ )
_lowerCamelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , lowercase_ , lowercase_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_lowerCamelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(lowercase_ , lowercase_ , lowercase_ )
return results
def lowerCAmelCase_( lowercase_ : Dict ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_lowerCamelCase = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
_lowerCamelCase = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
_lowerCamelCase = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'roberta-prelayernorm'
def __init__( self , lowerCamelCase__=5_0_2_6_5 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = classifier_dropout
class lowerCamelCase_( A__ ):
'''simple docstring'''
@property
def snake_case__ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 701 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__SCREAMING_SNAKE_CASE : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCAmelCase_( lowercase_ : dict[int, list[int]] , lowercase_ : int , lowercase_ : list[bool] ) -> list[int]:
_lowerCamelCase = True
_lowerCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase_ , lowercase_ , lowercase_ )
order.append(lowercase_ )
return order
def lowerCAmelCase_( lowercase_ : dict[int, list[int]] , lowercase_ : int , lowercase_ : list[bool] ) -> list[int]:
_lowerCamelCase = True
_lowerCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase_ , lowercase_ , lowercase_ )
return component
def lowerCAmelCase_( lowercase_ : dict[int, list[int]] ) -> list[list[int]]:
_lowerCamelCase = len(lowercase_ ) * [False]
_lowerCamelCase = {vert: [] for vert in range(len(lowercase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase_ )
_lowerCamelCase = []
for i, was_visited in enumerate(lowercase_ ):
if not was_visited:
order += topology_sort(lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = []
_lowerCamelCase = len(lowercase_ ) * [False]
for i in range(len(lowercase_ ) ):
_lowerCamelCase = order[len(lowercase_ ) - i - 1]
if not visited[vert]:
_lowerCamelCase = find_components(lowercase_ , lowercase_ , lowercase_ )
components_list.append(lowercase_ )
return components_list
| 702 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE : Any = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''camembert-base''': 5_1_2,
}
__SCREAMING_SNAKE_CASE : List[Any] = '''▁'''
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = ['input_ids', 'attention_mask']
lowercase__ : Tuple = CamembertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 703 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 'mgp-str'
def __init__( self , lowerCamelCase__=[3_2, 1_2_8] , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=2_7 , lowerCamelCase__=3_8 , lowerCamelCase__=5_0_2_5_7 , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = max_token_length
_lowerCamelCase = num_character_labels
_lowerCamelCase = num_bpe_labels
_lowerCamelCase = num_wordpiece_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = mlp_ratio
_lowerCamelCase = distilled
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = drop_rate
_lowerCamelCase = qkv_bias
_lowerCamelCase = attn_drop_rate
_lowerCamelCase = drop_path_rate
_lowerCamelCase = output_aa_attentions
_lowerCamelCase = initializer_range
| 704 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase_( lowercase_ : int ) -> str:
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCAmelCase_( ) -> Tuple:
_lowerCamelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=lowercase_ )
_lowerCamelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
TestCommand.register_subcommand(lowercase_ )
RunBeamCommand.register_subcommand(lowercase_ )
DummyDataCommand.register_subcommand(lowercase_ )
# Parse args
_lowerCamelCase , _lowerCamelCase = parser.parse_known_args()
if not hasattr(lowercase_ , '''func''' ):
parser.print_help()
exit(1 )
_lowerCamelCase = parse_unknown_args(lowercase_ )
# Run
_lowerCamelCase = args.func(lowercase_ , **lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = BartphoTokenizer
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = True
def snake_case__ ( self ):
super().setUp()
_lowerCamelCase = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = {'''unk_token''': '''<unk>'''}
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
_lowerCamelCase = BartphoTokenizer(lowerCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = '''This is a là test'''
_lowerCamelCase = '''This is a<unk><unk> test'''
return input_text, output_text
def snake_case__ ( self ):
_lowerCamelCase = BartphoTokenizer(lowerCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
_lowerCamelCase = '''This is a là test'''
_lowerCamelCase = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokens + [tokenizer.unk_token]
_lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 706 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = '''▁'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__SCREAMING_SNAKE_CASE : str = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCamelCase = 7
_lowerCamelCase = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_lowerCamelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_lowerCamelCase = len(self.sp_model )
_lowerCamelCase = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase__ )
_lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
_lowerCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCamelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ ))
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case__ ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self , lowerCamelCase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 707 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 0 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_( lowercase_ : str = "isbn/0140328726" ) -> dict:
_lowerCamelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_lowerCamelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCAmelCase_( lowercase_ : dict ) -> dict:
_lowerCamelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_lowerCamelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCamelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_lowerCamelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = ''', '''.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__SCREAMING_SNAKE_CASE : int = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__SCREAMING_SNAKE_CASE : Dict = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=1_6 , lowerCamelCase__=3_6 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = embedding_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_hidden_groups
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , sentence_order_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = AlbertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = AlbertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = AlbertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_choices
_lowerCamelCase = AlbertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ : int = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : str = True
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def snake_case__ ( self ):
_lowerCamelCase = AlbertModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = AlbertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = AlbertModel.from_pretrained('''albert-base-v2''' )
_lowerCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_lowerCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 ) )
| 709 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 710 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
def lowerCAmelCase_( lowercase_ : Dict=2 , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=16 , lowercase_ : int = 10 , lowercase_ : int = 2 ) -> Dict:
def get_dataset(lowercase_ : Tuple ):
_lowerCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCamelCase = get_dataset(lowercase_ )
_lowerCamelCase = get_dataset(lowercase_ )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase_( lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple=None ) -> Optional[Any]:
_lowerCamelCase = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCamelCase , _lowerCamelCase = batch
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase = nn.Parameter(torch.randn(1 ) )
_lowerCamelCase = nn.Parameter(torch.randn(1 ) )
def snake_case__ ( self , lowerCamelCase__ ):
return x * self.a + self.b
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
_lowerCamelCase = Accelerator(project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
# Train baseline
_lowerCamelCase = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''initial''' )
accelerator.save_state(lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
_lowerCamelCase = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''checkpoint''' )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
_lowerCamelCase = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = torch.tensor([1, 2, 3] )
_lowerCamelCase = torch.tensor([2, 3, 4] )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(net.parameters() )
_lowerCamelCase = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.9_9 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
_lowerCamelCase = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def snake_case__ ( self ):
_lowerCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''/tmp/accelerate/state_checkpointing'''
__SCREAMING_SNAKE_CASE : str = DummyModel()
__SCREAMING_SNAKE_CASE : int = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__SCREAMING_SNAKE_CASE : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__SCREAMING_SNAKE_CASE : List[Any] = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__SCREAMING_SNAKE_CASE : str = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__SCREAMING_SNAKE_CASE : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Optional[Any] = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__SCREAMING_SNAKE_CASE : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Tuple = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Optional[int] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 711 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE : Optional[Any] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCAmelCase_( lowercase_ : str , lowercase_ : int , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : List[str]=None , ) -> Optional[int]:
if attention_mask is None:
_lowerCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowerCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowerCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = initializer_range
def snake_case__ ( self ):
_lowerCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCamelCase = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
_lowerCamelCase = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = 2_0
_lowerCamelCase = model_class_name(lowerCamelCase__ )
_lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
_lowerCamelCase , _lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
_lowerCamelCase = model.decode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = 2_0
_lowerCamelCase = model_class_name(lowerCamelCase__ )
_lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
_lowerCamelCase , _lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_lowerCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_lowerCamelCase = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = 99
def snake_case__ ( self ):
_lowerCamelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowerCamelCase = input_ids.shape[0]
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._get_config_and_data()
_lowerCamelCase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
_lowerCamelCase = lm_model(input_ids=lowerCamelCase__ )
_lowerCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowerCamelCase = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
_lowerCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowerCamelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowerCamelCase = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
_lowerCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowerCamelCase = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
_lowerCamelCase = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
_lowerCamelCase = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_( A__, unittest.TestCase, A__ ):
'''simple docstring'''
lowercase__ : Optional[int] = True
lowercase__ : Optional[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self ):
_lowerCamelCase = FlaxBlenderbotSmallModelTester(self )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_lowerCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCamelCase = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_( metaclass=A__ ):
'''simple docstring'''
lowercase__ : Optional[int] = ['transformers', 'torch', 'note_seq']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def snake_case__ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def lowerCAmelCase_( lowercase_ : float ) -> str:
assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ )
_lowerCamelCase = int(lowercase_ )
_lowerCamelCase = ''''''
_lowerCamelCase = False
if decimal < 0:
_lowerCamelCase = True
decimal *= -1
while decimal > 0:
_lowerCamelCase , _lowerCamelCase = divmod(lowercase_ , 16 )
_lowerCamelCase = values[remainder] + hexadecimal
_lowerCamelCase = '''0x''' + hexadecimal
if negative:
_lowerCamelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 716 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : int = 'sew-d'
def __init__( self , lowerCamelCase__=3_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__=2 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2_5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=("p2c", "c2p") , lowerCamelCase__="layer_norm" , lowerCamelCase__="gelu_python" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-7 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__=False , lowerCamelCase__=1_2_8 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=0.0_5 , lowerCamelCase__=1_0 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=1_0 , lowerCamelCase__=0 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=2_5_6 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = feat_extract_norm
_lowerCamelCase = feat_extract_activation
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = conv_bias
_lowerCamelCase = num_conv_pos_embeddings
_lowerCamelCase = num_conv_pos_embedding_groups
_lowerCamelCase = len(self.conv_dim )
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = squeeze_factor
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = position_buckets
_lowerCamelCase = share_att_key
_lowerCamelCase = relative_attention
_lowerCamelCase = norm_rel_ebd
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = hidden_act
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = feat_proj_dropout
_lowerCamelCase = final_dropout
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = feature_layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase = apply_spec_augment
_lowerCamelCase = mask_time_prob
_lowerCamelCase = mask_time_length
_lowerCamelCase = mask_time_min_masks
_lowerCamelCase = mask_feature_prob
_lowerCamelCase = mask_feature_length
_lowerCamelCase = mask_feature_min_masks
# ctc loss
_lowerCamelCase = ctc_loss_reduction
_lowerCamelCase = ctc_zero_infinity
# sequence classification
_lowerCamelCase = use_weighted_layer_sum
_lowerCamelCase = classifier_proj_size
@property
def snake_case__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 717 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : int = 'speech_to_text'
lowercase__ : List[Any] = ['past_key_values']
lowercase__ : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCamelCase__=1_0_0_0_0 , lowerCamelCase__=1_2 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=2_5_6 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=6_0_0_0 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=2 , lowerCamelCase__=(5, 5) , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=8_0 , lowerCamelCase__=1 , **lowerCamelCase__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = use_cache
_lowerCamelCase = encoder_layers
_lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase = max_source_positions
_lowerCamelCase = max_target_positions
_lowerCamelCase = num_conv_layers
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = conv_channels
_lowerCamelCase = input_feat_per_channel
_lowerCamelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 718 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : int ) -> Optional[int]:
_lowerCamelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCamelCase = [1_44, 1_92, 2_40]
_lowerCamelCase = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
_lowerCamelCase = [96, 1_20, 1_44]
_lowerCamelCase = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCamelCase = [64, 80, 96]
_lowerCamelCase = [16, 16, 24, 48, 64, 80, 3_20]
_lowerCamelCase = 0.0_5
_lowerCamelCase = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowerCamelCase = 5_12
_lowerCamelCase = 16
_lowerCamelCase = 21
_lowerCamelCase = '''pascal-voc-id2label.json'''
else:
_lowerCamelCase = 10_00
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Union[str, Any]=False ) -> str:
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
_lowerCamelCase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
_lowerCamelCase = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowerCamelCase = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowerCamelCase = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowerCamelCase = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowerCamelCase = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowerCamelCase = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowerCamelCase = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowerCamelCase = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowerCamelCase = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_lowerCamelCase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
_lowerCamelCase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
_lowerCamelCase = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowerCamelCase = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowerCamelCase = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
_lowerCamelCase = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
_lowerCamelCase = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowerCamelCase = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowerCamelCase = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCamelCase = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowerCamelCase = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowerCamelCase = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowerCamelCase = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowerCamelCase = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowerCamelCase = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowerCamelCase = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowerCamelCase = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowerCamelCase = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowerCamelCase = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCamelCase = '''mobilevit.''' + name
return name
def lowerCAmelCase_( lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str]=False ) -> List[Any]:
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
_lowerCamelCase = key[8:]
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[0][6:] ) - 1
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
_lowerCamelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCamelCase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=False ) -> List[Any]:
_lowerCamelCase = get_mobilevit_config(lowercase_ )
# load original state_dict
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowerCamelCase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
_lowerCamelCase = MobileViTForImageClassification(lowercase_ ).eval()
_lowerCamelCase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
_lowerCamelCase = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCamelCase = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCamelCase = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCamelCase = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.8_6_2_4, -9.5_9_6_4], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
_lowerCamelCase = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
_lowerCamelCase = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCamelCase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
_lowerCamelCase = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowerCamelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization='''apple''' )
model.push_to_hub(lowercase_ , organization='''apple''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 719 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 50 ) -> int:
_lowerCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 720 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 0 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 1_00_00_00 ) -> int:
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = {1: 1}
for inputa in range(2 , lowercase_ ):
_lowerCamelCase = 0
_lowerCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCamelCase = counter
if counter > pre_counter:
_lowerCamelCase = inputa
_lowerCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'''vocab_file''': '''spiece.model'''}
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_lowerCamelCase = 3
_lowerCamelCase = do_lower_case
_lowerCamelCase = remove_space
_lowerCamelCase = keep_accents
_lowerCamelCase = vocab_file
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_lowerCamelCase = jieba
_lowerCamelCase = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self ):
return len(self.sp_model )
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , lowerCamelCase__ ):
if self.remove_space:
_lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
_lowerCamelCase = inputs
_lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_lowerCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase__ )
_lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] )
if self.do_lower_case:
_lowerCamelCase = outputs.lower()
return outputs
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.preprocess_text(lowerCamelCase__ )
_lowerCamelCase = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
_lowerCamelCase = []
for piece in pieces:
if len(lowerCamelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase = cur_pieces[1:]
else:
_lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase__ )
else:
new_pieces.append(lowerCamelCase__ )
return new_pieces
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.PieceToId(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.IdToPiece(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1]
return ([0] * len(lowerCamelCase__ )) + [1, 1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = super()._decode(*lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 700 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 0 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 3_2 , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCamelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCamelCase__ = True , lowerCamelCase__=7 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = do_resize
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_8_8}
_lowerCamelCase = size_divisor
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = do_center_crop
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_pad
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
def snake_case__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False ):
if not batched:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
_lowerCamelCase = size / min(lowerCamelCase__ , lowerCamelCase__ )
if h < w:
_lowerCamelCase , _lowerCamelCase = size, scale * w
else:
_lowerCamelCase , _lowerCamelCase = scale * h, size
_lowerCamelCase = int((1_3_3_3 / 8_0_0) * size )
if max(lowerCamelCase__ , lowerCamelCase__ ) > max_size:
_lowerCamelCase = max_size / max(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = newh * scale
_lowerCamelCase = neww * scale
_lowerCamelCase , _lowerCamelCase = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCamelCase , _lowerCamelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
_lowerCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self ):
_lowerCamelCase = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size_divisor''' ) )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
# Initialize image processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self ):
# Initialize image processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self ):
# Initialize image processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 701 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 702 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''MobileViTFeatureExtractor''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__SCREAMING_SNAKE_CASE = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCAmelCase_( lowercase_ : List[str] ) -> Tuple:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' ) as file:
for line_number, line in enumerate(lowercase_ ):
_lowerCamelCase = line.strip()
if line:
_lowerCamelCase = line.split()
_lowerCamelCase = line_number
_lowerCamelCase = words[0]
_lowerCamelCase = value
return result
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
_lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
_lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_lowerCamelCase = '''param'''
if weight_type is not None and weight_type != "param":
_lowerCamelCase = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
_lowerCamelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
_lowerCamelCase = shape_pointer.shape
# let's reduce dimension
_lowerCamelCase = value[0]
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> Dict:
_lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
_lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_lowerCamelCase = '''param'''
if weight_type is not None and weight_type != "param":
_lowerCamelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCamelCase = '''.'''.join([key, hf_param_name] )
else:
_lowerCamelCase = key
_lowerCamelCase = value if '''lm_head''' in full_key else value[0]
__SCREAMING_SNAKE_CASE = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any=None , lowercase_ : Optional[int]=None ) -> Dict:
_lowerCamelCase = False
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(lowercase_ )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple ) -> Optional[int]:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
_lowerCamelCase = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[Any]:
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=False ) -> Any:
if config_path is not None:
_lowerCamelCase = WavaVecaConfig.from_pretrained(lowercase_ )
else:
_lowerCamelCase = WavaVecaConfig()
if is_seq_class:
_lowerCamelCase = read_txt_into_dict(lowercase_ )
_lowerCamelCase = idalabel
_lowerCamelCase = WavaVecaForSequenceClassification(lowercase_ )
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 0
_lowerCamelCase = 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase = WavaVecaForCTC(lowercase_ )
else:
_lowerCamelCase = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
_lowerCamelCase = fairseq.tasks.setup_task(lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
_lowerCamelCase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 704 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 0 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' , lowercase_ ).groups()[0]
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
_lowerCamelCase = file_names
_lowerCamelCase = image_transform
_lowerCamelCase = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , lowerCamelCase__ ):
_lowerCamelCase = self.file_names[idx]
_lowerCamelCase = PIL.Image.open(lowerCamelCase__ )
_lowerCamelCase = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_lowerCamelCase = self.image_transform(lowerCamelCase__ )
_lowerCamelCase = extract_label(lowerCamelCase__ )
if self.label_to_id is not None:
_lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[Any] ) -> Union[str, Any]:
# Initialize accelerator
if args.with_tracking:
_lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase = config['''lr''']
_lowerCamelCase = int(config['''num_epochs'''] )
_lowerCamelCase = int(config['''seed'''] )
_lowerCamelCase = int(config['''batch_size'''] )
_lowerCamelCase = config['''image_size''']
if not isinstance(lowercase_ , (list, tuple) ):
_lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCamelCase = os.path.split(lowercase_ )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
_lowerCamelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_lowerCamelCase = [extract_label(lowercase_ ) for fname in file_names]
_lowerCamelCase = list(set(lowercase_ ) )
id_to_label.sort()
_lowerCamelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
_lowerCamelCase = np.random.permutation(len(lowercase_ ) )
_lowerCamelCase = int(0.8 * len(lowercase_ ) )
_lowerCamelCase = random_perm[:cut]
_lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCamelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
_lowerCamelCase = Compose([Resize(lowercase_ ), ToTensor()] )
_lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase = create_model('''resnet50d''' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCamelCase = False
for param in model.get_classifier().parameters():
_lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
_lowerCamelCase = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_lowerCamelCase = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCamelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCamelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
_lowerCamelCase = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_lowerCamelCase = None
else:
_lowerCamelCase = int(training_difference.replace('''step_''' , '''''' ) )
_lowerCamelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
_lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCamelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCamelCase = (batch['''image'''] - mean) / std
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCamelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
_lowerCamelCase = 0
_lowerCamelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCamelCase = (batch['''image'''] - mean) / std
with torch.no_grad():
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = outputs.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {1_00 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase_ ),
'''epoch''': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
_lowerCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
_lowerCamelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase_ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase_ , default=lowercase_ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase_ , default=lowercase_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCAmelCase_( lowercase_ : str ) -> str:
if "://" in dataset_path:
_lowerCamelCase = dataset_path.split('''://''' )[1]
return dataset_path
def lowerCAmelCase_( lowercase_ : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCAmelCase_( lowercase_ : fsspec.AbstractFileSystem , lowercase_ : str , lowercase_ : str ) -> Any:
_lowerCamelCase = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) , fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ , lowercase_ , recursive=lowercase_ )
def lowerCAmelCase_( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = threading.Lock()
| 706 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
'''simple docstring'''
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 707 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 0 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict ) -> Union[str, Any]:
if isinstance(lowercase_ , torch.Tensor ):
return image
elif isinstance(lowercase_ , PIL.Image.Image ):
_lowerCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_lowerCamelCase = np.concatenate(lowercase_ , axis=0 )
_lowerCamelCase = np.array(lowercase_ ).astype(np.floataa ) / 2_55.0
_lowerCamelCase = image.transpose(0 , 3 , 1 , 2 )
_lowerCamelCase = 2.0 * image - 1.0
_lowerCamelCase = torch.from_numpy(lowercase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCamelCase = torch.cat(lowercase_ , dim=0 )
return image
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any=0.9_9_9_5 ) -> Optional[Any]:
if not isinstance(lowercase_ , np.ndarray ):
_lowerCamelCase = True
_lowerCamelCase = va.device
_lowerCamelCase = va.cpu().numpy()
_lowerCamelCase = va.cpu().numpy()
_lowerCamelCase = np.sum(va * va / (np.linalg.norm(lowercase_ ) * np.linalg.norm(lowercase_ )) )
if np.abs(lowercase_ ) > DOT_THRESHOLD:
_lowerCamelCase = (1 - t) * va + t * va
else:
_lowerCamelCase = np.arccos(lowercase_ )
_lowerCamelCase = np.sin(lowercase_ )
_lowerCamelCase = theta_a * t
_lowerCamelCase = np.sin(lowercase_ )
_lowerCamelCase = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCamelCase = sin_theta_t / sin_theta_a
_lowerCamelCase = sa * va + sa * va
if inputs_are_torch:
_lowerCamelCase = torch.from_numpy(lowercase_ ).to(lowercase_ )
return va
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Dict ) -> str:
_lowerCamelCase = F.normalize(lowercase_ , dim=-1 )
_lowerCamelCase = F.normalize(lowercase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Union[str, Any] ) -> Optional[int]:
for param in model.parameters():
_lowerCamelCase = value
class lowerCamelCase_( A__ ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
super().__init__()
self.register_modules(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , clip_model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , coca_model=lowerCamelCase__ , coca_tokenizer=lowerCamelCase__ , coca_transform=lowerCamelCase__ , )
_lowerCamelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCamelCase__ )
else feature_extractor.size['''shortest_edge''']
)
_lowerCamelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCamelCase__ )
set_requires_grad(self.clip_model , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def snake_case__ ( self ):
self.enable_attention_slicing(lowerCamelCase__ )
def snake_case__ ( self ):
set_requires_grad(self.vae , lowerCamelCase__ )
def snake_case__ ( self ):
set_requires_grad(self.vae , lowerCamelCase__ )
def snake_case__ ( self ):
set_requires_grad(self.unet , lowerCamelCase__ )
def snake_case__ ( self ):
set_requires_grad(self.unet , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# get the original timestep using init_timestep
_lowerCamelCase = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_lowerCamelCase = max(num_inference_steps - init_timestep , 0 )
_lowerCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase__ )}""" )
_lowerCamelCase = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase__ )
]
_lowerCamelCase = torch.cat(lowerCamelCase__ , dim=0 )
else:
_lowerCamelCase = self.vae.encode(lowerCamelCase__ ).latent_dist.sample(lowerCamelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCamelCase = 0.1_8_2_1_5 * init_latents
_lowerCamelCase = init_latents.repeat_interleave(lowerCamelCase__ , dim=0 )
_lowerCamelCase = randn_tensor(init_latents.shape , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
_lowerCamelCase = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = init_latents
return latents
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.coca_transform(lowerCamelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCamelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCamelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.feature_extractor.preprocess(lowerCamelCase__ )
_lowerCamelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCamelCase = self.clip_model.get_image_features(lowerCamelCase__ )
_lowerCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase__ )
_lowerCamelCase = image_embeddings_clip.repeat_interleave(lowerCamelCase__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = latents.detach().requires_grad_()
_lowerCamelCase = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
_lowerCamelCase = self.unet(lowerCamelCase__ , lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCamelCase = self.scheduler.alphas_cumprod[timestep]
_lowerCamelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCamelCase = torch.sqrt(lowerCamelCase__ )
_lowerCamelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCamelCase__ ):
_lowerCamelCase = self.scheduler.sigmas[index]
_lowerCamelCase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCamelCase = 1 / 0.1_8_2_1_5 * sample
_lowerCamelCase = self.vae.decode(lowerCamelCase__ ).sample
_lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = transforms.Resize(self.feature_extractor_size )(lowerCamelCase__ )
_lowerCamelCase = self.normalize(lowerCamelCase__ ).to(latents.dtype )
_lowerCamelCase = self.clip_model.get_image_features(lowerCamelCase__ )
_lowerCamelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase__ )
_lowerCamelCase = spherical_dist_loss(lowerCamelCase__ , lowerCamelCase__ ).mean() * clip_guidance_scale
_lowerCamelCase = -torch.autograd.grad(lowerCamelCase__ , lowerCamelCase__ )[0]
if isinstance(self.scheduler , lowerCamelCase__ ):
_lowerCamelCase = latents.detach() + grads * (sigma**2)
_lowerCamelCase = noise_pred_original
else:
_lowerCamelCase = noise_pred_original - torch.sqrt(lowerCamelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 5_1_2 , lowerCamelCase__ = 5_1_2 , lowerCamelCase__ = 0.6 , lowerCamelCase__ = 5_0 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = 0.8 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(lowerCamelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(lowerCamelCase__ , torch.Generator ) and batch_size > 1:
_lowerCamelCase = [generator] + [None] * (batch_size - 1)
_lowerCamelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_lowerCamelCase = [x[0] for x in coca_is_none if x[1]]
_lowerCamelCase = ''', '''.join(lowerCamelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCamelCase = self.get_image_description(lowerCamelCase__ )
if style_prompt is None:
if len(lowerCamelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCamelCase = self.get_image_description(lowerCamelCase__ )
# get prompt text embeddings for content and style
_lowerCamelCase = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
_lowerCamelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCamelCase = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
_lowerCamelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCamelCase = slerp(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase = text_embeddings.repeat_interleave(lowerCamelCase__ , dim=0 )
# set timesteps
_lowerCamelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCamelCase = {}
if accepts_offset:
_lowerCamelCase = 1
self.scheduler.set_timesteps(lowerCamelCase__ , **lowerCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCamelCase , _lowerCamelCase = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_lowerCamelCase = timesteps[:1].repeat(lowerCamelCase__ )
# Preprocess image
_lowerCamelCase = preprocess(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.prepare_latents(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , text_embeddings.dtype , self.device , lowerCamelCase__ )
_lowerCamelCase = preprocess(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.prepare_latents(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , text_embeddings.dtype , self.device , lowerCamelCase__ )
_lowerCamelCase = slerp(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if clip_guidance_scale > 0:
_lowerCamelCase = self.get_clip_image_embeddings(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.get_clip_image_embeddings(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = slerp(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase = content_text_input.input_ids.shape[-1]
_lowerCamelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCamelCase__ , return_tensors='''pt''' )
_lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCamelCase = uncond_embeddings.repeat_interleave(lowerCamelCase__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCamelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCamelCase = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device='''cpu''' , dtype=lowerCamelCase__ ).to(
self.device )
else:
_lowerCamelCase = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCamelCase = {}
if accepts_eta:
_lowerCamelCase = eta
# check if the scheduler accepts generator
_lowerCamelCase = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCamelCase = generator
with self.progress_bar(total=lowerCamelCase__ ):
for i, t in enumerate(lowerCamelCase__ ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
_lowerCamelCase = self.unet(lowerCamelCase__ , lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase = noise_pred.chunk(2 )
_lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCamelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCamelCase , _lowerCamelCase = self.cond_fn(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCamelCase = 1 / 0.1_8_2_1_5 * latents
_lowerCamelCase = self.vae.decode(lowerCamelCase__ ).sample
_lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase__ , nsfw_content_detected=lowerCamelCase__ )
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = SwinConfig(image_size=1_92 )
if "base" in model_name:
_lowerCamelCase = 6
_lowerCamelCase = 1_28
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (4, 8, 16, 32)
elif "large" in model_name:
_lowerCamelCase = 12
_lowerCamelCase = 1_92
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
_lowerCamelCase = window_size
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = num_heads
return config
def lowerCAmelCase_( lowercase_ : Dict ) -> List[str]:
if "encoder.mask_token" in name:
_lowerCamelCase = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
_lowerCamelCase = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
_lowerCamelCase = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''layernorm.bias'''
if "decoder" in name:
pass
else:
_lowerCamelCase = '''swin.''' + name
return name
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Dict:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowercase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[2] )
_lowerCamelCase = int(key_split[4] )
_lowerCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[
dim : dim * 2, :
]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[
:dim
]
_lowerCamelCase = val[
dim : dim * 2
]
_lowerCamelCase = val[
-dim:
]
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
_lowerCamelCase = get_swin_config(lowercase_ )
_lowerCamelCase = SwinForMaskedImageModeling(lowercase_ )
model.eval()
_lowerCamelCase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
_lowerCamelCase = image_processor(images=lowercase_ , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase = model(**lowercase_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 710 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any = 'fnet'
def __init__( self , lowerCamelCase__=3_2_0_0_0 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=4 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=False , lowerCamelCase__=5_1_2 , lowerCamelCase__=3 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = type_vocab_size
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_tpu_fourier_optimizations
_lowerCamelCase = tpu_short_seq_length
| 712 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None ):
# Input as list
_lowerCamelCase = list(poly_a or [0] )[:]
_lowerCamelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_lowerCamelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_lowerCamelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_lowerCamelCase = self.__multiply()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase__ ) <= 1:
return dft[0]
#
_lowerCamelCase = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase = [[] for i in range(lowerCamelCase__ )]
_lowerCamelCase = self.root**next_ncol
# First half of next step
_lowerCamelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_lowerCamelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_lowerCamelCase = new_dft
_lowerCamelCase = next_ncol // 2
return dft[0]
def snake_case__ ( self ):
_lowerCamelCase = self.__dft('''A''' )
_lowerCamelCase = self.__dft('''B''' )
_lowerCamelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase = [[] for i in range(lowerCamelCase__ )]
_lowerCamelCase = self.root ** (next_ncol // 2)
_lowerCamelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_lowerCamelCase = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
_lowerCamelCase = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
_lowerCamelCase = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
_lowerCamelCase = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
__SCREAMING_SNAKE_CASE : List[str] = TypeVar('''U''')
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = key
_lowerCamelCase = val
_lowerCamelCase = None
_lowerCamelCase = None
def __repr__( self ):
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = self.rear, self.head
def __repr__( self ):
_lowerCamelCase = ['''DoubleLinkedList''']
_lowerCamelCase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase__ ) )
_lowerCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCamelCase = node
_lowerCamelCase = previous
_lowerCamelCase = node
_lowerCamelCase = self.rear
def snake_case__ ( self , lowerCamelCase__ ):
if node.prev is None or node.next is None:
return None
_lowerCamelCase = node.next
_lowerCamelCase = node.prev
_lowerCamelCase = None
_lowerCamelCase = None
return node
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
lowercase__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = DoubleLinkedList()
_lowerCamelCase = capacity
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = {}
def __repr__( self ):
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , lowerCamelCase__ ):
return key in self.cache
def snake_case__ ( self , lowerCamelCase__ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_lowerCamelCase = self.cache[key]
_lowerCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase__ )
return node.val
self.miss += 1
return None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCamelCase = value
self.list.add(lowerCamelCase__ )
@classmethod
def snake_case__ ( cls , lowerCamelCase__ = 1_2_8 ):
def cache_decorator_inner(lowerCamelCase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCamelCase = LRUCache(lowerCamelCase__ )
_lowerCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCamelCase = func(*lowerCamelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase__ , '''cache_info''' , lowerCamelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : Tuple ) -> Any:
_lowerCamelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCamelCase = 1_28
elif "12-12" in model_name:
_lowerCamelCase = 12
_lowerCamelCase = 12
elif "14-14" in model_name:
_lowerCamelCase = 14
_lowerCamelCase = 14
elif "16-16" in model_name:
_lowerCamelCase = 16
_lowerCamelCase = 16
else:
raise ValueError('''Model not supported''' )
_lowerCamelCase = '''huggingface/label-files'''
if "speech-commands" in model_name:
_lowerCamelCase = 35
_lowerCamelCase = '''speech-commands-v2-id2label.json'''
else:
_lowerCamelCase = 5_27
_lowerCamelCase = '''audioset-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Tuple:
if "module.v" in name:
_lowerCamelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_lowerCamelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_lowerCamelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_lowerCamelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_lowerCamelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCamelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_lowerCamelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_lowerCamelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Any ) -> str:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[3] )
_lowerCamelCase = config.hidden_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[dim : dim * 2, :]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCAmelCase_( lowercase_ : Any ) -> Tuple:
_lowerCamelCase = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : Any=False ) -> Optional[Any]:
_lowerCamelCase = get_audio_spectrogram_transformer_config(lowercase_ )
_lowerCamelCase = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_lowerCamelCase = model_name_to_url[model_name]
_lowerCamelCase = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
# remove some keys
remove_keys(lowercase_ )
# rename some keys
_lowerCamelCase = convert_state_dict(lowercase_ , lowercase_ )
# load 🤗 model
_lowerCamelCase = ASTForAudioClassification(lowercase_ )
model.eval()
model.load_state_dict(lowercase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCamelCase = -4.2_6_7_7_3_9_3 if '''speech-commands''' not in model_name else -6.8_4_5_9_7_8
_lowerCamelCase = 4.5_6_8_9_9_7_4 if '''speech-commands''' not in model_name else 5.5_6_5_4_5_2_6
_lowerCamelCase = 10_24 if '''speech-commands''' not in model_name else 1_28
_lowerCamelCase = ASTFeatureExtractor(mean=lowercase_ , std=lowercase_ , max_length=lowercase_ )
if "speech-commands" in model_name:
_lowerCamelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_lowerCamelCase = dataset[0]['''audio''']['''array''']
else:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
_lowerCamelCase = waveform.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowercase_ , sampling_rate=1_60_00 , return_tensors='''pt''' )
# forward pass
_lowerCamelCase = model(**lowercase_ )
_lowerCamelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCamelCase = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCamelCase = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCamelCase = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCamelCase = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCamelCase = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCamelCase = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCamelCase = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__SCREAMING_SNAKE_CASE : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__SCREAMING_SNAKE_CASE : List[str] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = get_test_to_tester_mapping(lowerCamelCase__ )
_lowerCamelCase = get_test_to_tester_mapping(lowerCamelCase__ )
_lowerCamelCase = {'''BertModelTest''': '''BertModelTester'''}
_lowerCamelCase = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = get_model_to_test_mapping(lowerCamelCase__ )
_lowerCamelCase = get_model_to_test_mapping(lowerCamelCase__ )
_lowerCamelCase = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_lowerCamelCase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = get_model_to_tester_mapping(lowerCamelCase__ )
_lowerCamelCase = get_model_to_tester_mapping(lowerCamelCase__ )
_lowerCamelCase = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_lowerCamelCase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
| 716 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.