code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
_UpperCAmelCase : Optional[Any] = MBartConfig
_UpperCAmelCase : str = {}
_UpperCAmelCase : str = 'gelu'
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str]=1_3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Tuple=9_9 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : str=3_7 ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[Any] = eos_token_id
__lowerCamelCase : List[str] = pad_token_id
__lowerCamelCase : int = bos_token_id
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size)
__lowerCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) ,1)
__lowerCamelCase : int = tf.concat([input_ids, eos_tensor] ,axis=1)
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
__lowerCamelCase : Union[str, Any] = prepare_mbart_inputs_dict(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase)
return config, inputs_dict
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = TFMBartModel(config=_lowerCAmelCase).get_decoder()
__lowerCamelCase : Optional[int] = inputs_dict['input_ids']
__lowerCamelCase : List[str] = input_ids[:1, :]
__lowerCamelCase : List[str] = inputs_dict['attention_mask'][:1, :]
__lowerCamelCase : Any = inputs_dict['head_mask']
__lowerCamelCase : Tuple = 1
# first forward pass
__lowerCamelCase : Any = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,head_mask=_lowerCAmelCase ,use_cache=_lowerCAmelCase)
__lowerCamelCase , __lowerCamelCase : str = outputs.to_tuple()
__lowerCamelCase : Any = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Union[str, Any]:
if attention_mask is None:
__lowerCamelCase : List[Any] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Tuple = False
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = TFMBartModelTester(self)
__lowerCamelCase : Optional[Any] = ConfigTester(self ,config_class=_lowerCAmelCase)
def lowerCAmelCase ( self : List[str]):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
_UpperCAmelCase : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
_UpperCAmelCase : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def lowerCAmelCase ( self : List[Any]):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def lowerCAmelCase ( self : str ,**SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Optional[int] = self.translate_src_text(**_lowerCAmelCase)
self.assertListEqual(self.expected_text ,_lowerCAmelCase)
def lowerCAmelCase ( self : Dict ,**SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = self.tokenizer(self.src_text ,**_lowerCAmelCase ,return_tensors='tf')
__lowerCamelCase : List[str] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2)
__lowerCamelCase : List[Any] = self.tokenizer.batch_decode(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase)
return generated_words
@slow
def lowerCAmelCase ( self : List[str]):
self._assert_generated_batch_equal_expected()
| 652 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
if height >= 1:
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
move_disk(lowerCAmelCase_ , lowerCAmelCase_ )
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
print('''moving disk from''' , lowerCAmelCase_ , '''to''' , lowerCAmelCase_ )
def snake_case ( ) -> int:
_snake_case = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowerCAmelCase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 103 |
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
__lowercase = ["""""".join(lowerCamelCase ) for row in temp_grid]
__lowercase = """""".join(lowerCamelCase )
return output_string
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
__lowercase = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
__lowercase = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCAmelCase_ = {"""facebook/blenderbot_small-90M""": 512}
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
lowercase_ : Optional[Any] = set()
lowercase_ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Optional[int] = char
lowercase_ : int = set(lowercase )
return pairs
class UpperCamelCase__ ( _lowerCAmelCase ):
'''simple docstring'''
__a : List[Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : str = ['input_ids', 'attention_mask']
def __init__( self, snake_case__, snake_case__, snake_case__="__start__", snake_case__="__end__", snake_case__="__unk__", snake_case__="__null__", **snake_case__, ) -> str:
"""simple docstring"""
super().__init__(unk_token=_lowerCAmelCase, bos_token=_lowerCAmelCase, eos_token=_lowerCAmelCase, pad_token=_lowerCAmelCase, **_lowerCAmelCase )
with open(_lowerCAmelCase, encoding="""utf-8""" ) as vocab_handle:
lowercase_ : List[str] = json.load(_lowerCAmelCase )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase, encoding="""utf-8""" ) as merges_handle:
lowercase_ : Dict = merges_handle.read().split("""\n""" )[1:-1]
lowercase_ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
lowercase_ : Optional[Any] = dict(zip(_lowerCAmelCase, range(len(_lowerCAmelCase ) ) ) )
lowercase_ : Optional[Any] = {}
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case__ ( self, snake_case__ ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase_ : str = re.sub("""([.,!?()])""", r""" \1""", _lowerCAmelCase )
lowercase_ : str = re.sub("""(')""", r""" \1 """, _lowerCAmelCase )
lowercase_ : List[Any] = re.sub(r"""\s{2,}""", """ """, _lowerCAmelCase )
if "\n" in token:
lowercase_ : Tuple = token.replace("""\n""", """ __newln__""" )
lowercase_ : int = token.split(""" """ )
lowercase_ : int = []
for token in tokens:
if not len(_lowerCAmelCase ):
continue
lowercase_ : List[str] = token.lower()
lowercase_ : List[str] = tuple(_lowerCAmelCase )
lowercase_ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowercase_ : Optional[int] = get_pairs(_lowerCAmelCase )
if not pairs:
words.append(_lowerCAmelCase )
continue
while True:
lowercase_ : List[str] = min(_lowerCAmelCase, key=lambda snake_case__ : self.bpe_ranks.get(_lowerCAmelCase, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : List[Any] = bigram
lowercase_ : List[Any] = []
lowercase_ : Any = 0
while i < len(_lowerCAmelCase ):
try:
lowercase_ : List[Any] = word.index(_lowerCAmelCase, _lowerCAmelCase )
new_word.extend(word[i:j] )
lowercase_ : int = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Optional[Any] = tuple(_lowerCAmelCase )
lowercase_ : Optional[Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
lowercase_ : int = get_pairs(_lowerCAmelCase )
lowercase_ : Optional[Any] = """@@ """.join(_lowerCAmelCase )
lowercase_ : List[Any] = word[:-4]
lowercase_ : Dict = word
words.append(_lowerCAmelCase )
return " ".join(_lowerCAmelCase )
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
lowercase_ : Union[str, Any] = []
lowercase_ : Union[str, Any] = re.findall(r"""\S+\n?""", _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : int = token.lower()
return self.encoder.get(_lowerCAmelCase, self.encoder.get(self.unk_token ) )
def snake_case__ ( self, snake_case__ ) -> str:
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase, self.unk_token )
def snake_case__ ( self, snake_case__ ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = """ """.join(_lowerCAmelCase ).replace("""@@ """, """""" ).strip()
return out_string
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Union[str, Any] = os.path.join(
_lowerCAmelCase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Optional[int] = os.path.join(
_lowerCAmelCase, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCAmelCase, ensure_ascii=_lowerCAmelCase ) + """\n""" )
lowercase_ : Optional[Any] = 0
with open(_lowerCAmelCase, """w""", encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowercase_ : Any = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file | 458 |
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase__ = field(
default=1_28, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
UpperCAmelCase__ = field(
default=_lowerCAmelCase, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Tuple = self.task_name.lower()
class lowerCAmelCase ( _lowerCAmelCase ):
UpperCAmelCase__ = 'train'
UpperCAmelCase__ = 'dev'
UpperCAmelCase__ = 'test'
class lowerCAmelCase ( _lowerCAmelCase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : GlueDataTrainingArguments , UpperCAmelCase : PreTrainedTokenizerBase , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Union[str, Split] = Split.train , UpperCAmelCase : Optional[str] = None , ) -> str:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _lowerCAmelCase , )
lowerCamelCase__ : int = args
lowerCamelCase__ : Dict = glue_processors[args.task_name]()
lowerCamelCase__ : List[str] = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
lowerCamelCase__ : Any = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCamelCase__ : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
lowerCamelCase__ : Union[str, Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : List[str] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : Dict = cached_features_file + '.lock'
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ : List[str] = time.time()
lowerCamelCase__ : Tuple = torch.load(_lowerCAmelCase )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
lowerCamelCase__ : Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ : Union[str, Any] = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ : Optional[Any] = examples[:limit_length]
lowerCamelCase__ : Union[str, Any] = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ : Optional[Any] = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Any ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Any , UpperCAmelCase : Optional[Any] ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> Optional[Any]:
return self.label_list
| 295 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : List[str]=[1, 2, 1] , _lowerCAmelCase : Dict=[2, 2, 4] , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[Any]=2.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : int=False , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Union[str, Any]=1e-5 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[Any]=["stage1", "stage2", "stage3"] , _lowerCAmelCase : Union[str, Any]=[1, 2, 3] , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__snake_case :Optional[int] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__snake_case :Optional[int] = False
__snake_case :Any = False
__snake_case :List[str] = False
__snake_case :Tuple = False
__snake_case :Optional[int] = False
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a ( self : Any ) -> Any:
"""simple docstring"""
pass
def _a ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCAmelCase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(_lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]={} ):
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'
F' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__snake_case :Dict = MaskFormerSwinConfig
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
__lowercase = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 80 | 0 |
from maths.prime_check import is_prime
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCAmelCase )
if is_prime(_lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = torch.nn.Linear(10 , 10 )
__lowercase = torch.optim.SGD(model.parameters() , 0.1 )
__lowercase = Accelerator()
__lowercase = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : List[Any] = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ["""PerceiverFeatureExtractor"""]
snake_case__ : str = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 402 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__UpperCamelCase : Dict = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__UpperCamelCase : int = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__UpperCamelCase : List[str] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase__( _lowerCAmelCase ):
__magic_name__ : Tuple = 'pegasus'
__magic_name__ : Optional[int] = ['past_key_values']
__magic_name__ : Tuple = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , lowerCAmelCase : str=50265 , lowerCAmelCase : Union[str, Any]=1024 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Union[str, Any]=4096 , lowerCAmelCase : int=16 , lowerCAmelCase : Dict=12 , lowerCAmelCase : Optional[int]=4096 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any="gelu" , lowerCAmelCase : int=1024 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Any=0 , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Optional[Any]=1 , **lowerCAmelCase : Optional[Any] , )-> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def a__( self : Tuple )-> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__( self : str )-> int:
"""simple docstring"""
return self.d_model
| 210 |
import os
from collections.abc import Iterator
def snake_case ( lowerCamelCase = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowerCamelCase ):
__lowercase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase , lowerCamelCase ).lstrip("""./""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(lowerCamelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def snake_case ( lowerCamelCase = "." ):
'''simple docstring'''
__lowercase = """"""
for filepath in sorted(good_file_paths(lowerCamelCase ) ):
__lowercase , __lowercase = os.path.split(lowerCamelCase )
if filepath != old_path:
__lowercase = print_path(lowerCamelCase , lowerCamelCase )
__lowercase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowercase = F'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowercase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'{md_prefix(lowerCamelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 80 | 0 |
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = name
_snake_case = val
def __str__(self ) -> Union[str, Any]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> List[Any]:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(_lowerCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(_lowerCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> int:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> str:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> Any:
_snake_case = len(_lowerCAmelCase ) - 1
_snake_case = self.get_parent_idx(_lowerCAmelCase )
for idx, i in enumerate(_lowerCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(_lowerCAmelCase , -1 , -1 ):
self.sift_down(_lowerCAmelCase , _lowerCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
while True:
_snake_case = self.get_left_child_idx(_lowerCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(_lowerCAmelCase )
_snake_case = idx
if l < len(_lowerCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(_lowerCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> List[str]:
_snake_case = self.get_parent_idx(_lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(_lowerCAmelCase )
def lowercase (self ) -> int:
return self.heap[0]
def lowercase (self ) -> str:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
self.heap.append(_lowerCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 |
from math import factorial
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase ) // (factorial(lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 80 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a ( pl.LightningModule ):
def __init__( self : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
def _UpperCAmelCase ( UpperCamelCase: List[Any] , UpperCamelCase: str , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = LongformerModel.from_pretrained(UpperCamelCase )
__lowerCAmelCase = LightningModel(UpperCamelCase )
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 611 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case ( ):
'''simple docstring'''
__lowercase = [randint(-1_000 , 1_000 ) for i in range(10 )]
__lowercase = randint(-5_000 , 5_000 )
return (arr, r)
__UpperCamelCase : Any = make_dataset()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for triplet in permutations(lowerCamelCase , 3 ):
if sum(lowerCamelCase ) == target:
return tuple(sorted(lowerCamelCase ) )
return (0, 0, 0)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
arr.sort()
__lowercase = len(lowerCamelCase )
for i in range(n - 1 ):
__lowercase , __lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case ( ):
'''simple docstring'''
__lowercase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowercase = """
triplet_sum1(*dataset)
"""
__lowercase = """
triplet_sum2(*dataset)
"""
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
return (min(lowerCamelCase ), min(lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : Tuple = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 80 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: List[str] ) -> List[Any]:
"""simple docstring"""
_snake_case = Path(_UpperCamelCase )
_snake_case = Path(_UpperCamelCase )
dest_dir.mkdir(exist_ok=_UpperCamelCase )
for path in src_dir.iterdir():
_snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
_snake_case = dest_dir.joinpath(path.name )
print(_UpperCamelCase )
dest_path.open("w" ).write("\n".join(_UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 185 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None ) -> int:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = None
def _a ( self : int , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=_lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=torch.floataa ) -> Tuple:
"""simple docstring"""
__lowercase = torch.empty(_lowerCAmelCase , dtype=_lowerCAmelCase )
dist.scatter(_lowerCAmelCase , src=0 , scatter_list=_lowerCAmelCase , group=self.process_group )
return target_tensor
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith("""e""" )) , _lowerCAmelCase )
return ifname
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCAmelCase )]
dist.gather(torch.tensor(_lowerCAmelCase ) , dst=0 , gather_list=_lowerCAmelCase , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(_lowerCAmelCase ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(_lowerCAmelCase ).numpy() , _lowerCAmelCase )
__lowercase , __lowercase = torch.tensor(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCAmelCase )
| 80 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : List[str] , snake_case : str )-> Optional[int]:
def count_of_possible_combinations(snake_case : Optional[Any] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Any , snake_case : Any )-> Any:
def count_of_possible_combinations_with_dp_array(
snake_case : int , snake_case : Union[str, Any] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case )
for item in array )
_lowerCamelCase = answer
return answer
_lowerCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Optional[int] , snake_case : List[Any] )-> Dict:
_lowerCamelCase = [0] * (target + 1)
_lowerCamelCase = 1
for i in range(1 , target + 1 ):
for j in range(snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Union[str, Any] =3
A_ : Any =5
A_ : Any =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 650 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[Any] = 1
@register_to_config
def __init__( self : str , _lowerCAmelCase : int = 1000 , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ) -> Optional[int]:
"""simple docstring"""
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
__lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowercase = 4
# running values
__lowercase = []
def _a ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowercase = torch.sin(steps * math.pi / 2 ) ** 2
__lowercase = (1.0 - self.betas**2) ** 0.5
__lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowercase = timesteps.to(_lowerCAmelCase )
__lowercase = []
def _a ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowercase = (self.timesteps == timestep).nonzero().item()
__lowercase = timestep_index + 1
__lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
__lowercase = self.ets[-1]
elif len(self.ets ) == 2:
__lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowercase = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : torch.FloatTensor , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.alphas[timestep_index]
__lowercase = self.betas[timestep_index]
__lowercase = self.alphas[prev_timestep_index]
__lowercase = self.betas[prev_timestep_index]
__lowercase = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
__lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 80 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
a =logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : CLIPSegForImageSegmentation ,SCREAMING_SNAKE_CASE__ : CLIPSegProcessor ,SCREAMING_SNAKE_CASE__ : AutoencoderKL ,SCREAMING_SNAKE_CASE__ : CLIPTextModel ,SCREAMING_SNAKE_CASE__ : CLIPTokenizer ,SCREAMING_SNAKE_CASE__ : UNetaDConditionModel ,SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,SCREAMING_SNAKE_CASE__ : StableDiffusionSafetyChecker ,SCREAMING_SNAKE_CASE__ : CLIPImageProcessor ,):
super().__init__()
if hasattr(scheduler.config ,'steps_offset') and scheduler.config.steps_offset != 1:
__lowerCamelCase : Dict = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' ,'1.0.0' ,_lowerCAmelCase ,standard_warn=_lowerCAmelCase)
__lowerCamelCase : List[str] = dict(scheduler.config)
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : Any = FrozenDict(_lowerCAmelCase)
if hasattr(scheduler.config ,'skip_prk_steps') and scheduler.config.skip_prk_steps is False:
__lowerCamelCase : str = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' ,'1.0.0' ,_lowerCAmelCase ,standard_warn=_lowerCAmelCase)
__lowerCamelCase : Optional[Any] = dict(scheduler.config)
__lowerCamelCase : str = True
__lowerCamelCase : str = FrozenDict(_lowerCAmelCase)
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
segmentation_model=_lowerCAmelCase ,segmentation_processor=_lowerCAmelCase ,vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,safety_checker=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase)
def lowerCAmelCase ( self : Optional[int]):
self.enable_attention_slicing(_lowerCAmelCase)
def lowerCAmelCase ( self : Tuple):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
__lowerCamelCase : List[str] = torch.device('cuda')
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase ,_lowerCAmelCase)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : Tuple):
if self.device != torch.device('meta') or not hasattr(self.unet ,'_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase ,'_hf_hook')
and hasattr(module._hf_hook ,'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_1_2 ,SCREAMING_SNAKE_CASE__ : int = 5_0 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
__lowerCamelCase : Optional[int] = self.segmentation_processor(
text=[text] ,images=[image] ,padding='max_length' ,return_tensors='pt').to(self.device)
__lowerCamelCase : List[Any] = self.segmentation_model(**_lowerCAmelCase)
__lowerCamelCase : Dict = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
__lowerCamelCase : str = self.numpy_to_pil(_lowerCAmelCase)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
__lowerCamelCase : Optional[Any] = StableDiffusionInpaintPipeline(
vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,)
return inpainting_pipeline(
prompt=_lowerCAmelCase ,image=_lowerCAmelCase ,mask_image=_lowerCAmelCase ,height=_lowerCAmelCase ,width=_lowerCAmelCase ,num_inference_steps=_lowerCAmelCase ,guidance_scale=_lowerCAmelCase ,negative_prompt=_lowerCAmelCase ,num_images_per_prompt=_lowerCAmelCase ,eta=_lowerCAmelCase ,generator=_lowerCAmelCase ,latents=_lowerCAmelCase ,output_type=_lowerCAmelCase ,return_dict=_lowerCAmelCase ,callback=_lowerCAmelCase ,callback_steps=_lowerCAmelCase ,)
| 652 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] , _lowerCAmelCase : T ) -> List[str]:
"""simple docstring"""
__lowercase = data
__lowercase = None
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return F'{self.data}'
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase = None
def __iter__( self : int ) -> Iterator[T]:
"""simple docstring"""
__lowercase = self.top
while node:
yield node.data
__lowercase = node.next
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self : str ) -> bool:
"""simple docstring"""
return self.top is None
def _a ( self : List[str] , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
__lowercase = Node(_lowerCAmelCase )
if not self.is_empty():
__lowercase = self.top
__lowercase = node
def _a ( self : Union[str, Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _lowerCAmelCase )
__lowercase = self.top
__lowercase = self.top.next
return pop_node.data
def _a ( self : int ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def _a ( self : int ) -> None:
"""simple docstring"""
__lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
"""simple docstring"""
snake_case = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 103 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : Union[str, Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = VersatileDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """cyberpunk 2077"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.text_to_image(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = pipe.image_variation(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 80 | 0 |
def __magic_name__ ( lowercase = 1000 ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[int] = -1
lowercase_ : int = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase_ : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase_ : Any = n - a - b
if c * c == (a * a + b * b):
lowercase_ : Tuple = a * b * c
if candidate >= product:
lowercase_ : Tuple = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''') | 458 |
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(_lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
__lowercase = list(_lowerCAmelCase )
__lowercase = degree
def __add__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowerCAmelCase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowerCAmelCase )
def __sub__( self : int , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : int | float ) -> int | float:
"""simple docstring"""
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
__lowercase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCAmelCase )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.__str__()
def _a ( self : List[str] ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : int | float = 0 ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowerCAmelCase )
def __eq__( self : List[str] , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Dict , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
return not self.__eq__(_lowerCAmelCase )
| 80 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class lowerCAmelCase ( _lowerCAmelCase ):
UpperCAmelCase__ = 'lilt'
def __init__( self : Any , UpperCAmelCase : str=30522 , UpperCAmelCase : Tuple=768 , UpperCAmelCase : Any=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Any=1e-12 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Optional[Any]="absolute" , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Union[str, Any]=1024 , **UpperCAmelCase : Dict , ) -> Union[str, Any]:
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Dict = position_embedding_type
lowerCamelCase__ : Union[str, Any] = classifier_dropout
lowerCamelCase__ : List[Any] = channel_shrink_ratio
lowerCamelCase__ : Optional[int] = max_ad_position_embeddings
| 295 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase )
__lowercase = max(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 80 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=14, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=4, lowerCamelCase__=None, ):
A : Union[str, Any] = parent
A : Optional[Any] = batch_size
A : List[Any] = seq_length
A : Tuple = is_training
A : Optional[Any] = use_token_type_ids
A : List[str] = use_input_mask
A : List[str] = use_labels
A : Any = use_mc_token_ids
A : Optional[Any] = vocab_size
A : str = hidden_size
A : str = num_hidden_layers
A : int = num_attention_heads
A : List[Any] = intermediate_size
A : Dict = hidden_act
A : Any = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : str = max_position_embeddings
A : Optional[int] = type_vocab_size
A : Union[str, Any] = type_sequence_label_size
A : Dict = initializer_range
A : Union[str, Any] = num_labels
A : Any = num_choices
A : Any = scope
A : Any = self.vocab_size - 1
def _lowerCAmelCase ( self ):
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Tuple = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A : Dict = None
if self.use_token_type_ids:
A : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Any = None
if self.use_mc_token_ids:
A : Optional[Any] = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
A : Union[str, Any] = None
A : Union[str, Any] = None
A : Any = None
if self.use_labels:
A : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : List[Any] = ids_tensor([self.batch_size], self.num_choices )
A : Tuple = self.get_config()
A : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ ):
A : int = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase, head_mask=_lowerCAmelCase )
model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase )
A : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ ):
A : str = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A : Any = model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase, labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A : Tuple = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Dict = config_and_inputs
A : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ ):
A : Optional[Any] = self.num_labels
A : str = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : Dict = model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase, labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : str = False
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowerCAmelCase ( self ):
A : List[Any] = CTRLModelTester(self )
A : Tuple = ConfigTester(self, config_class=_lowerCAmelCase, n_embd=37 )
def _lowerCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def _lowerCAmelCase ( self ):
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ):
pass
@slow
def _lowerCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ):
pass
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(_lowerCAmelCase )
A : Dict = torch.tensor(
[[1_1859, 0, 1611, 8]], dtype=torch.long, device=_lowerCAmelCase ) # Legal the president is
A : Optional[int] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
A : Union[str, Any] = model.generate(_lowerCAmelCase, do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist(), _lowerCAmelCase )
| 662 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , _lowerCAmelCase : Optional[Any]=[2, 2, 3, 2] , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : str=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Dict=[2, 3, 4] , _lowerCAmelCase : Tuple=None , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case :List[str] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case :str = True
__snake_case :Any = False
__snake_case :Any = False
__snake_case :Any = False
__snake_case :int = False
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _a ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case :str = ConvNextConfig
__snake_case :Optional[Any] = False
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 80 | 0 |
snake_case__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
snake_case__ : str = {value: key for key, value in encode_dict.items()}
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if set(_SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
__lowercase = ""
for word in coded.split():
while len(_SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
__lowercase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 402 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__UpperCamelCase : str = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Tuple = FLAX_MODEL_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 80 | 0 |
'''simple docstring'''
from math import isqrt, loga
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A , A ):
UpperCAmelCase = False
return [i for i in range(2 , A ) if is_prime[i]]
def lowerCamelCase__ ( A : Dict = 80_08_00 , A : Optional[Any] = 80_08_00 ):
'''simple docstring'''
UpperCAmelCase = degree * loga(A )
UpperCAmelCase = int(A )
UpperCAmelCase = calculate_prime_numbers(A )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : int = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = str(_SCREAMING_SNAKE_CASE )
_snake_case = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 11 ):
_snake_case = []
_snake_case = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
_snake_case = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __SCREAMING_SNAKE_CASE ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''') | 585 |
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase : Union[str, Any] = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
__UpperCamelCase : List[str] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
__UpperCamelCase : Tuple = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase ) ),
}
| 80 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class a ( _lowerCAmelCase ):
lowercase_ : Dict = 'gpt_neox'
def __init__( self : Optional[int] , snake_case__ : str=50_432 , snake_case__ : Optional[Any]=6_144 , snake_case__ : List[Any]=44 , snake_case__ : Union[str, Any]=64 , snake_case__ : List[str]=24_576 , snake_case__ : List[Any]="gelu" , snake_case__ : str=0.2_5 , snake_case__ : List[Any]=10_000 , snake_case__ : str=0.0 , snake_case__ : Any=0.0 , snake_case__ : int=0.1 , snake_case__ : List[str]=2_048 , snake_case__ : Union[str, Any]=0.0_2 , snake_case__ : List[str]=1E-5 , snake_case__ : List[Any]=True , snake_case__ : List[str]=0 , snake_case__ : Dict=2 , snake_case__ : List[str]=False , snake_case__ : List[Any]=True , snake_case__ : Dict=None , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = rotary_pct
__lowerCAmelCase = rotary_emb_base
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = use_cache
__lowerCAmelCase = tie_word_embeddings
__lowerCAmelCase = use_parallel_residual
__lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
__lowerCAmelCase = self.rope_scaling.get("type" , _lowerCAmelCase )
__lowerCAmelCase = self.rope_scaling.get("factor" , _lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 611 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__UpperCamelCase : Dict = {"""facebook/blenderbot_small-90M""": 512}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
__lowercase = set(lowerCamelCase )
return pairs
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = VOCAB_FILES_NAMES
__snake_case :Tuple = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :str = ['input_ids', 'attention_mask']
def __init__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str="__start__" , _lowerCAmelCase : int="__end__" , _lowerCAmelCase : Any="__unk__" , _lowerCAmelCase : List[Any]="__null__" , **_lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
super().__init__(unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__lowercase = json.load(_lowerCAmelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
__lowercase = merges_handle.read().split("""\n""" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in merges]
__lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowercase = {}
@property
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : str , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowercase = re.sub("""([.,!?()])""" , r""" \1""" , _lowerCAmelCase )
__lowercase = re.sub("""(')""" , r""" \1 """ , _lowerCAmelCase )
__lowercase = re.sub(r"""\s{2,}""" , """ """ , _lowerCAmelCase )
if "\n" in token:
__lowercase = token.replace("""\n""" , """ __newln__""" )
__lowercase = token.split(""" """ )
__lowercase = []
for token in tokens:
if not len(_lowerCAmelCase ):
continue
__lowercase = token.lower()
__lowercase = tuple(_lowerCAmelCase )
__lowercase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowercase = get_pairs(_lowerCAmelCase )
if not pairs:
words.append(_lowerCAmelCase )
continue
while True:
__lowercase = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCAmelCase ):
try:
__lowercase = word.index(_lowerCAmelCase , _lowerCAmelCase )
new_word.extend(word[i:j] )
__lowercase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCAmelCase )
__lowercase = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCAmelCase )
__lowercase = """@@ """.join(_lowerCAmelCase )
__lowercase = word[:-4]
__lowercase = word
words.append(_lowerCAmelCase )
return " ".join(_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
__lowercase = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def _a ( self : Tuple , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = token.lower()
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _a ( self : Tuple , _lowerCAmelCase : int ) -> str:
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def _a ( self : Dict , _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
__lowercase = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowercase = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 80 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __a ( _UpperCamelCase: List[str] , _UpperCamelCase: Any=7 ) -> Dict:
"""simple docstring"""
_snake_case = None
if token is not None:
_snake_case = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_snake_case = "636036"
_snake_case = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
return result["workflow_runs"]
def __a ( _UpperCamelCase: List[str] ) -> List[Any]:
"""simple docstring"""
_snake_case = get_daily_ci_runs(_UpperCamelCase )
_snake_case = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case = workflow_run["id"]
break
return workflow_run_id
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Tuple , _UpperCamelCase: List[str] ) -> Optional[int]:
"""simple docstring"""
_snake_case = get_last_daily_ci_runs(_UpperCamelCase )
if workflow_run_id is not None:
_snake_case = get_artifacts_links(worflow_run_id=_UpperCamelCase , token=_UpperCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case = artifacts_links[artifact_name]
download_artifact(
artifact_name=_UpperCamelCase , artifact_url=_UpperCamelCase , output_dir=_UpperCamelCase , token=_UpperCamelCase )
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[Any] , _UpperCamelCase: Optional[Any] ) -> Dict:
"""simple docstring"""
get_last_daily_ci_artifacts(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = {}
for artifact_name in artifact_names:
_snake_case = os.path.join(_UpperCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(_UpperCamelCase ):
_snake_case = {}
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
with z.open(_UpperCamelCase ) as f:
_snake_case = f.read().decode("UTF-8" )
return results
| 185 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = 'lxmert'
__snake_case :Union[str, Any] = {}
def __init__( self : List[str] , _lowerCAmelCase : Dict=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Union[str, Any]=9500 , _lowerCAmelCase : Union[str, Any]=1600 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Tuple=3072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : Any=9 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Dict=2048 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[Any]=6.67 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , **_lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = num_qa_labels
__lowercase = num_object_labels
__lowercase = num_attr_labels
__lowercase = l_layers
__lowercase = x_layers
__lowercase = r_layers
__lowercase = visual_feat_dim
__lowercase = visual_pos_dim
__lowercase = visual_loss_normalizer
__lowercase = task_matched
__lowercase = task_mask_lm
__lowercase = task_obj_predict
__lowercase = task_qa
__lowercase = visual_obj_loss
__lowercase = visual_attr_loss
__lowercase = visual_feat_loss
__lowercase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_lowerCAmelCase )
| 80 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __a :
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
class __a :
def __init__( self , a__ ):
_lowerCamelCase = [[] for _ in range(_lowerCAmelCase )]
_lowerCamelCase = size
def __getitem__( self , a__ ):
return iter(self._graph[vertex] )
@property
def snake_case_ ( self ):
return self._size
def snake_case_ ( self , a__ , a__ , a__ ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = deque([start_vertex] )
_lowerCamelCase = [None] * self.size
_lowerCamelCase = 0
while queue:
_lowerCamelCase = queue.popleft()
_lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowerCamelCase = current_distance + edge.weight
_lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
_lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = DistilBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = DistilBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DistilBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DistilBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DistilBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = DistilBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case :Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Tuple = True
__snake_case :Tuple = True
__snake_case :List[str] = True
__snake_case :Optional[int] = True
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = DistilBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , dim=37 )
def _a ( self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCAmelCase )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCAmelCase )
@slow
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DistilBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
@require_torch_gpu
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = torch.jit.trace(
_lowerCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """traced_model.pt""" ) )
__lowercase = torch.jit.load(os.path.join(_lowerCAmelCase , """traced_model.pt""" ) , map_location=_lowerCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_lowerCAmelCase ) , inputs_dict["""attention_mask"""].to(_lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__lowercase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
| 80 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a =get_tests_dir("""fixtures""")
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = mock.Mock()
__lowerCamelCase : Optional[int] = 5_0_0
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : List[str] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_lowerCAmelCase) as mock_head:
__lowerCamelCase : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json')
def lowerCAmelCase ( self : str):
with self.assertRaises(_lowerCAmelCase):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants')
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' ,subfolder='feature_extractor')
self.assertIsNotNone(_lowerCAmelCase)
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls : Any):
__lowerCamelCase : Dict = TOKEN
HfFolder.save_token(_lowerCAmelCase)
@classmethod
def lowerCAmelCase ( cls : Any):
try:
delete_repo(token=cls._token ,repo_id='test-image-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-image-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-image-processor')
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Any = ViTImageProcessor.from_pretrained(_lowerCAmelCase)
image_processor.push_to_hub('test-image-processor' ,use_auth_token=self._token)
__lowerCamelCase : Optional[Any] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase))
# Reset repo
delete_repo(token=self._token ,repo_id='test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCAmelCase ,repo_id='test-image-processor' ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token)
__lowerCamelCase : Any = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(_lowerCAmelCase)
image_processor.push_to_hub('valid_org/test-image-processor' ,use_auth_token=self._token)
__lowerCamelCase : int = ViTImageProcessor.from_pretrained('valid_org/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase))
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowerCAmelCase ,repo_id='valid_org/test-image-processor-org' ,push_to_hub=_lowerCAmelCase ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org')
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowerCAmelCase ,getattr(_lowerCAmelCase ,_lowerCAmelCase))
def lowerCAmelCase ( self : str):
CustomImageProcessor.register_for_auto_class()
__lowerCamelCase : Optional[int] = CustomImageProcessor.from_pretrained(_lowerCAmelCase)
image_processor.push_to_hub('test-dynamic-image-processor' ,use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map ,{'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} ,)
__lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" ,trust_remote_code=_lowerCAmelCase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ ,'CustomImageProcessor')
| 652 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCamelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
__snake_case :Optional[int] = None
__snake_case :Dict = None
@property
def _a ( self : str ) -> List[str]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """feature_size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """padding_value""" ) )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _a ( self : str , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : int ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = self.feat_extract_tester.seq_length_diff
__lowercase = self.feat_extract_tester.max_seq_length + pad_diff
__lowercase = self.feat_extract_tester.min_seq_length
__lowercase = self.feat_extract_tester.batch_size
__lowercase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
__lowercase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" )[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowercase = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , pad_to_multiple_of=10 )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _a ( self : Tuple , _lowerCAmelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(_lowerCAmelCase : Tuple ):
__lowercase = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase : Any , _lowerCAmelCase : str ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__lowercase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""longest""" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowercase = 12
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
__lowercase = input_a[input_name]
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
__lowercase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowercase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
self._check_padding(numpify=_lowerCAmelCase )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.pad(_lowerCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_lowerCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_common()
__lowercase = [len(_lowerCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_lowerCAmelCase )
__lowercase = feat_extract.pad(
_lowerCAmelCase , padding="""max_length""" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 80 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
return len(set(lowerCAmelCase_ ) ) == len(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
__lowercase = ["""""".join(lowerCamelCase ) for row in temp_grid]
__lowercase = """""".join(lowerCamelCase )
return output_string
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
__lowercase = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
__lowercase = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowerCAmelCase ):
'''simple docstring'''
__a : Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__a : Dict = 'nezha'
def __init__( self, snake_case__=2_11_28, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_12, snake_case__=64, snake_case__=2, snake_case__=0.02, snake_case__=1E-12, snake_case__=0.1, snake_case__=0, snake_case__=2, snake_case__=3, snake_case__=True, **snake_case__, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase, bos_token_id=_lowerCAmelCase, eos_token_id=_lowerCAmelCase, **_lowerCAmelCase )
lowercase_ : int = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[Any] = hidden_act
lowercase_ : List[Any] = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : int = max_position_embeddings
lowercase_ : Union[str, Any] = max_relative_position
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Dict = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Any = classifier_dropout
lowercase_ : Tuple = use_cache | 458 |
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : int = 101 ) -> Any:
lowerCamelCase__ : List[str] = length
def __len__( self : Union[str, Any] ) -> List[Any]:
return self.length
def __getitem__( self : Tuple , UpperCAmelCase : str ) -> int:
return i
class lowerCAmelCase :
def __call__( self : List[Any] , UpperCAmelCase : Dict ) -> str:
return {"input_ids": torch.tensor(_lowerCAmelCase ), "labels": torch.tensor(_lowerCAmelCase )}
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] ) -> Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase__ : Any = nn.Linear(120 , 80 )
def A_ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> Optional[int]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase ( _lowerCAmelCase ):
@require_torch_neuroncore
def A_ ( self : Dict ) -> Union[str, Any]:
lowerCamelCase__ : Any = F"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
lowerCamelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : str = F"""--output_dir {output_dir}""".split()
lowerCamelCase__ : Optional[int] = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase ( _lowerCAmelCase ):
@require_torch_multi_gpu
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = F"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
lowerCamelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Optional[int] = F"""--output_dir {output_dir}""".split()
lowerCamelCase__ : str = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_UpperCAmelCase : Dict = HfArgumentParser((TrainingArguments,))
_UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_UpperCAmelCase : str = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Tuple = list(range(len(_UpperCAmelCase ) ) )
lowerCamelCase__ : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
_UpperCAmelCase : int = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_UpperCAmelCase : Dict = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_UpperCAmelCase : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : Optional[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_UpperCAmelCase : List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_UpperCAmelCase : Tuple = None
| 295 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : List[str]=[1, 2, 1] , _lowerCAmelCase : Dict=[2, 2, 4] , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[Any]=2.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : int=False , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Union[str, Any]=1e-5 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[Any]=["stage1", "stage2", "stage3"] , _lowerCAmelCase : Union[str, Any]=[1, 2, 3] , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__snake_case :Optional[int] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__snake_case :Optional[int] = False
__snake_case :Any = False
__snake_case :List[str] = False
__snake_case :Tuple = False
__snake_case :Optional[int] = False
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a ( self : Any ) -> Any:
"""simple docstring"""
pass
def _a ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCAmelCase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(_lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]={} ):
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'
F' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__snake_case :Dict = MaskFormerSwinConfig
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
__lowercase = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 80 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE_:Dict = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = True
while ask_again:
A : int = input(_lowerCAmelCase )
try:
if default is not None and len(_lowerCAmelCase ) == 0:
return default
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=[] , _lowerCAmelCase=None , _lowerCAmelCase=0 ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = BulletMenu(_lowerCAmelCase , _lowerCAmelCase )
A : Dict = menu.run(default_choice=_lowerCAmelCase )
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = int(_lowerCAmelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : List[str] = int(_lowerCAmelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = int(_lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
A : Dict = int(_lowerCAmelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
A : int = int(_lowerCAmelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class SCREAMING_SNAKE_CASE__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = super()._format_usage(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
A : Tuple = usage.replace("""<command> [<args>] """, """""" )
return usage
| 662 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = torch.nn.Linear(10 , 10 )
__lowercase = torch.optim.SGD(model.parameters() , 0.1 )
__lowercase = Accelerator()
__lowercase = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 80 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _A ( _lowerCAmelCase ):
'''simple docstring'''
_snake_case : BigBirdConfig
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
super().setup()
__lowercase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[int] , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _A ( _lowerCAmelCase ):
'''simple docstring'''
_snake_case : Tuple = FlaxBigBirdForNaturalQuestionsModule
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
__lowercase = logits.shape[-1]
__lowercase = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype("f4" )
__lowercase = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
__lowercase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowercase = reduction(_SCREAMING_SNAKE_CASE )
return loss
__lowercase = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
__lowercase = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _A :
'''simple docstring'''
_snake_case : str = "google/bigbird-roberta-base"
_snake_case : int = 3000
_snake_case : int = 1_0500
_snake_case : int = 128
_snake_case : int = 3
_snake_case : int = 1
_snake_case : int = 5
# tx_args
_snake_case : float = 3E-5
_snake_case : float = 0.0
_snake_case : int = 2_0000
_snake_case : float = 0.0_0_9_5
_snake_case : str = "bigbird-roberta-natural-questions"
_snake_case : str = "training-expt"
_snake_case : str = "data/nq-training.jsonl"
_snake_case : str = "data/nq-validation.jsonl"
def _snake_case ( self : Dict ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=_lowerCAmelCase )
__lowercase = os.path.join(self.base_dir , self.save_dir )
__lowercase = self.batch_size_per_device * jax.device_count()
@dataclass
class _A :
'''simple docstring'''
_snake_case : int
_snake_case : int = 4096 # no dynamic padding on TPUs
def __call__( self : Any , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = self.collate_fn(_lowerCAmelCase )
__lowercase = jax.tree_util.tree_map(_lowerCAmelCase , _lowerCAmelCase )
return batch
def _snake_case ( self : Dict , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase , __lowercase = self.fetch_inputs(features["input_ids"] )
__lowercase = {
"input_ids": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self : Optional[Any] , lowerCamelCase : list ):
'''simple docstring'''
__lowercase = [self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def _snake_case ( self : int , lowerCamelCase : list ):
'''simple docstring'''
__lowercase = [1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if seed is not None:
__lowercase = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
__lowercase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name="batch" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
def loss_fn(_SCREAMING_SNAKE_CASE ):
__lowercase = model_inputs.pop("start_labels" )
__lowercase = model_inputs.pop("end_labels" )
__lowercase = model_inputs.pop("pooled_labels" )
__lowercase = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
__lowercase , __lowercase , __lowercase = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase = jax.random.split(_SCREAMING_SNAKE_CASE )
__lowercase = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = grad_fn(state.params )
__lowercase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
__lowercase = jax.lax.pmean(_SCREAMING_SNAKE_CASE , "batch" )
__lowercase = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowercase = model_inputs.pop("start_labels" )
__lowercase = model_inputs.pop("end_labels" )
__lowercase = model_inputs.pop("pooled_labels" )
__lowercase = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
__lowercase , __lowercase , __lowercase = outputs
__lowercase = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _A ( train_state.TrainState ):
'''simple docstring'''
_snake_case : Callable = struct.field(pytree_node=_lowerCAmelCase )
@dataclass
class _A :
'''simple docstring'''
_snake_case : Args
_snake_case : Callable
_snake_case : Callable
_snake_case : Callable
_snake_case : Callable
_snake_case : wandb
_snake_case : Callable = None
def _snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Tuple=None ):
'''simple docstring'''
__lowercase = model.params
__lowercase = TrainState.create(
apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , loss_fn=_lowerCAmelCase , )
if ckpt_dir is not None:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = restore_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__lowercase , __lowercase = build_tx(**_lowerCAmelCase )
__lowercase = train_state.TrainState(
step=_lowerCAmelCase , apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , opt_state=_lowerCAmelCase , )
__lowercase = args
__lowercase = data_collator
__lowercase = lr
__lowercase = params
__lowercase = jax_utils.replicate(_lowerCAmelCase )
return state
def _snake_case ( self : str , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = self.args
__lowercase = len(_lowerCAmelCase ) // args.batch_size
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase = jnp.array(0 , dtype=jnp.floataa )
__lowercase = get_batched_dataset(_lowerCAmelCase , args.batch_size , seed=_lowerCAmelCase )
__lowercase = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc=f"""Running EPOCH-{epoch}""" ):
__lowercase = self.data_collator(_lowerCAmelCase )
__lowercase , __lowercase , __lowercase = self.train_step_fn(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__lowercase = jax_utils.unreplicate(state.step )
__lowercase = running_loss.item() / i
__lowercase = self.scheduler_fn(state_step - 1 )
__lowercase = self.evaluate(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase , commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=_lowerCAmelCase )
def _snake_case ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ):
'''simple docstring'''
__lowercase = get_batched_dataset(_lowerCAmelCase , self.args.batch_size )
__lowercase = len(_lowerCAmelCase ) // self.args.batch_size
__lowercase = jnp.array(0 , dtype=jnp.floataa )
__lowercase = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc="Evaluating ... " ):
__lowercase = self.data_collator(_lowerCAmelCase )
__lowercase = self.val_step_fn(_lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def _snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ):
'''simple docstring'''
__lowercase = jax_utils.unreplicate(_lowerCAmelCase )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(_lowerCAmelCase , params=state.params )
with open(os.path.join(_lowerCAmelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCAmelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(_lowerCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(_lowerCAmelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , _lowerCAmelCase )
print("DONE" )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "flax_model.msgpack" ) , "rb" ) as f:
__lowercase = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "opt_state.msgpack" ) , "rb" ) as f:
__lowercase = from_bytes(state.opt_state , f.read() )
__lowercase = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , "args.joblib" ) )
__lowercase = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , "data_collator.joblib" ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , "training_state.json" ) , "r" ) as f:
__lowercase = json.load(_SCREAMING_SNAKE_CASE )
__lowercase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = num_train_steps - warmup_steps
__lowercase = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
__lowercase = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1E-7 , transition_steps=_SCREAMING_SNAKE_CASE )
__lowercase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
__lowercase = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
__lowercase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
__lowercase = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 402 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__UpperCamelCase : Dict = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__UpperCamelCase : int = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__UpperCamelCase : List[str] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase__( _lowerCAmelCase ):
__magic_name__ : List[str] = ['image_processor', 'tokenizer']
__magic_name__ : Dict = 'BlipImageProcessor'
__magic_name__ : Any = 'AutoTokenizer'
def __init__( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] )-> Tuple:
"""simple docstring"""
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
# add QFormer tokenizer
UpperCAmelCase = qformer_tokenizer
def __call__( self : List[str] , lowerCAmelCase : ImageInput = None , lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : List[Any] , )-> BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
UpperCAmelCase = BatchFeature()
if text is not None:
UpperCAmelCase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
encoding.update(_lowerCAmelCase )
UpperCAmelCase = self.qformer_tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase = qformer_text_encoding.pop('''input_ids''' )
UpperCAmelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
UpperCAmelCase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
encoding.update(_lowerCAmelCase )
return encoding
def a__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def a__( self : List[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any] )-> Any:
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def a__( self : Any , lowerCAmelCase : str , **lowerCAmelCase : List[str] )-> List[str]:
"""simple docstring"""
if os.path.isfile(_lowerCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase = os.path.join(_lowerCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(_lowerCAmelCase )
return super().save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def a__( cls : Any , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase , subfolder='''qformer_tokenizer''' )
UpperCAmelCase = cls._get_arguments_from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
args.append(_lowerCAmelCase )
return cls(*_lowerCAmelCase )
| 210 |
import os
from collections.abc import Iterator
def snake_case ( lowerCamelCase = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowerCamelCase ):
__lowercase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase , lowerCamelCase ).lstrip("""./""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return F'{i * " "}*' if i else "\n##"
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(lowerCamelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def snake_case ( lowerCamelCase = "." ):
'''simple docstring'''
__lowercase = """"""
for filepath in sorted(good_file_paths(lowerCamelCase ) ):
__lowercase , __lowercase = os.path.split(lowerCamelCase )
if filepath != old_path:
__lowercase = print_path(lowerCamelCase , lowerCamelCase )
__lowercase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowercase = F'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowercase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'{md_prefix(lowerCamelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 80 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=400 , UpperCAmelCase=2000 , UpperCAmelCase=24 , UpperCAmelCase=24 , UpperCAmelCase=0.0 , UpperCAmelCase=16000 , UpperCAmelCase=True , UpperCAmelCase=True , ) -> str:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = num_mel_bins
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def lowercase (self ) -> str:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase (self , UpperCAmelCase=False , UpperCAmelCase=False ) -> Union[str, Any]:
def _flatten(UpperCAmelCase ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase (self ) -> str:
_snake_case = SpeechaTextFeatureExtractionTester(self )
def lowercase (self , UpperCAmelCase ) -> Tuple:
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase (self ) -> Any:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
_snake_case = feature_extractor(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_snake_case = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
_snake_case = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
_snake_case = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(_lowerCAmelCase )
_snake_case = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
_snake_case = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowercase (self ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ["""longest""", """max_length""", """do_not_pad"""]
_snake_case = [None, 16, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
_snake_case = feature_extractor(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = [np.sum(_lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase (self ) -> Any:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ["""longest""", """max_length""", """do_not_pad"""]
_snake_case = [None, 16, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
_snake_case = feature_extractor(
_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" , return_attention_mask=_lowerCAmelCase )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = [np.sum(_lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase (self ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
_lowerCAmelCase , padding="""max_length""" , max_length=4 , truncation=_lowerCAmelCase , return_tensors="""np""" , return_attention_mask=_lowerCAmelCase , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase (self ) -> Any:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
_lowerCAmelCase , padding="""longest""" , max_length=4 , truncation=_lowerCAmelCase , return_tensors="""np""" , return_attention_mask=_lowerCAmelCase , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
_lowerCAmelCase , padding="""longest""" , max_length=16 , truncation=_lowerCAmelCase , return_tensors="""np""" , return_attention_mask=_lowerCAmelCase , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase (self ) -> Union[str, Any]:
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(100 , 32 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
from datasets import load_dataset
_snake_case = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_snake_case = ds.sort("""id""" ).select(range(_lowerCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase (self ) -> Any:
_snake_case = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = feature_extractor(_lowerCAmelCase , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowerCAmelCase , atol=1e-4 ) ) | 585 |
from math import factorial
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase ) // (factorial(lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 80 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : int , snake_case__ : Dict , snake_case__ : int=13 , snake_case__ : Optional[int]=7 , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=False , snake_case__ : Union[str, Any]=False , snake_case__ : Optional[Any]=False , snake_case__ : Any=2 , snake_case__ : Union[str, Any]=99 , snake_case__ : int=0 , snake_case__ : Tuple=32 , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=4 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=512 , snake_case__ : List[Any]=2 , snake_case__ : Optional[int]=0.0_2 , snake_case__ : List[str]=2 , snake_case__ : Optional[int]=4 , snake_case__ : Dict="last" , snake_case__ : Tuple=True , snake_case__ : int=None , snake_case__ : str=0 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_lengths
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = gelu_activation
__lowerCAmelCase = sinusoidal_embeddings
__lowerCAmelCase = causal
__lowerCAmelCase = asm
__lowerCAmelCase = n_langs
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_special
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = summary_type
__lowerCAmelCase = use_proj
__lowerCAmelCase = scope
__lowerCAmelCase = bos_token_id
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_input_lengths:
__lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Dict , ):
"""simple docstring"""
__lowerCAmelCase = XLMModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(_lowerCAmelCase , lengths=_lowerCAmelCase , langs=_lowerCAmelCase )
__lowerCAmelCase = model(_lowerCAmelCase , langs=_lowerCAmelCase )
__lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , ):
"""simple docstring"""
__lowerCAmelCase = XLMWithLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple , snake_case__ : str , ):
"""simple docstring"""
__lowerCAmelCase = XLMForQuestionAnsweringSimple(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(_lowerCAmelCase )
__lowerCAmelCase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
__lowerCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Any , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , ):
"""simple docstring"""
__lowerCAmelCase = XLMForQuestionAnswering(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(_lowerCAmelCase )
__lowerCAmelCase = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , p_mask=_lowerCAmelCase , )
__lowerCAmelCase = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , )
((__lowerCAmelCase ) , ) = result_with_labels.to_tuple()
__lowerCAmelCase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
((__lowerCAmelCase ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Any , ):
"""simple docstring"""
__lowerCAmelCase = XLMForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(_lowerCAmelCase )
__lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = XLMForTokenClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[str] , ):
"""simple docstring"""
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = XLMForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase_ : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Optional[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = XLMModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , emb_dim=37 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowerCAmelCase )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Any=False , snake_case__ : List[str]=1 ):
"""simple docstring"""
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(
[isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(_lowerCAmelCase ) )
self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowerCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase = min_length + idx + 1
__lowerCAmelCase = min_length + idx + 1
__lowerCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowerCAmelCase ) )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int]=False , snake_case__ : Dict=1 ):
"""simple docstring"""
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(
[isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_lowerCAmelCase ) , )
self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowerCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase = min_length + idx + 1
__lowerCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowerCAmelCase ) , )
pass
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = XLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_lowerCAmelCase )
__lowerCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=_lowerCAmelCase ) # the president
__lowerCAmelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowerCAmelCase )
| 611 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case ( ):
'''simple docstring'''
__lowercase = [randint(-1_000 , 1_000 ) for i in range(10 )]
__lowercase = randint(-5_000 , 5_000 )
return (arr, r)
__UpperCamelCase : Any = make_dataset()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for triplet in permutations(lowerCamelCase , 3 ):
if sum(lowerCamelCase ) == target:
return tuple(sorted(lowerCamelCase ) )
return (0, 0, 0)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
arr.sort()
__lowercase = len(lowerCamelCase )
for i in range(n - 1 ):
__lowercase , __lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case ( ):
'''simple docstring'''
__lowercase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowercase = """
triplet_sum1(*dataset)
"""
__lowercase = """
triplet_sum2(*dataset)
"""
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
return (min(lowerCamelCase ), min(lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : Tuple = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 80 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class _a ( _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
SCREAMING_SNAKE_CASE_ : str = "audio"
SCREAMING_SNAKE_CASE_ : str = "labels"
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_lowerCAmelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_snake_case = copy.deepcopy(self )
_snake_case = self.label_schema.copy()
_snake_case = features[self.label_column]
_snake_case = label_schema
return task_template
@property
def _lowercase ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 185 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None ) -> int:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
__lowercase = None
def _a ( self : int , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=_lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=torch.floataa ) -> Tuple:
"""simple docstring"""
__lowercase = torch.empty(_lowerCAmelCase , dtype=_lowerCAmelCase )
dist.scatter(_lowerCAmelCase , src=0 , scatter_list=_lowerCAmelCase , group=self.process_group )
return target_tensor
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith("""e""" )) , _lowerCAmelCase )
return ifname
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCAmelCase )]
dist.gather(torch.tensor(_lowerCAmelCase ) , dst=0 , gather_list=_lowerCAmelCase , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(_lowerCAmelCase ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(_lowerCAmelCase ).numpy() , _lowerCAmelCase )
__lowercase , __lowercase = torch.tensor(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._chunk_tensor(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(_lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCAmelCase )
| 80 | 0 |
"""simple docstring"""
import numpy as np
class __a :
def __init__( self ):
_lowerCamelCase = (0, 0)
_lowerCamelCase = None
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def __eq__( self , a__ ):
return self.position == cell.position
def snake_case_ ( self ):
print(self.position )
class __a :
def __init__( self , a__=(5, 5) ):
_lowerCamelCase = np.zeros(_lowerCAmelCase )
_lowerCamelCase = world_size[0]
_lowerCamelCase = world_size[1]
def snake_case_ ( self ):
print(self.w )
def snake_case_ ( self , a__ ):
_lowerCamelCase = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_lowerCamelCase = cell.position[0]
_lowerCamelCase = cell.position[1]
_lowerCamelCase = []
for n in neughbour_cord:
_lowerCamelCase = current_x + n[0]
_lowerCamelCase = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_lowerCamelCase = Cell()
_lowerCamelCase = (x, y)
_lowerCamelCase = cell
neighbours.append(_lowerCAmelCase )
return neighbours
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : int , snake_case : Union[str, Any] )-> List[str]:
_lowerCamelCase = []
_lowerCamelCase = []
_open.append(snake_case )
while _open:
_lowerCamelCase = np.argmin([n.f for n in _open] )
_lowerCamelCase = _open[min_f]
_closed.append(_open.pop(snake_case ) )
if current == goal:
break
for n in world.get_neigbours(snake_case ):
for c in _closed:
if c == n:
continue
_lowerCamelCase = current.g + 1
_lowerCamelCase , _lowerCamelCase = n.position
_lowerCamelCase , _lowerCamelCase = goal.position
_lowerCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2
_lowerCamelCase = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case )
_lowerCamelCase = []
while current.parent is not None:
path.append(current.position )
_lowerCamelCase = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ : Any =Gridworld()
# Start position and goal
A_ : Any =Cell()
A_ : List[str] =(0, 0)
A_ : Optional[int] =Cell()
A_ : str =(4, 4)
print(f'path from {start.position} to {goal.position}')
A_ : str =astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ : Any =1
print(world.w)
| 650 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[Any] = 1
@register_to_config
def __init__( self : str , _lowerCAmelCase : int = 1000 , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ) -> Optional[int]:
"""simple docstring"""
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
__lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowercase = 4
# running values
__lowercase = []
def _a ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowercase = torch.sin(steps * math.pi / 2 ) ** 2
__lowercase = (1.0 - self.betas**2) ** 0.5
__lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowercase = timesteps.to(_lowerCAmelCase )
__lowercase = []
def _a ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowercase = (self.timesteps == timestep).nonzero().item()
__lowercase = timestep_index + 1
__lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
__lowercase = self.ets[-1]
elif len(self.ets ) == 2:
__lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowercase = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : torch.FloatTensor , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.alphas[timestep_index]
__lowercase = self.betas[timestep_index]
__lowercase = self.alphas[prev_timestep_index]
__lowercase = self.betas[prev_timestep_index]
__lowercase = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
__lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 80 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase : Dict = StableUnCLIPPipeline
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCAmelCase : Tuple = False
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = 3_2
__lowerCamelCase : Union[str, Any] = embedder_hidden_size
# prior components
torch.manual_seed(0)
__lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=_lowerCAmelCase ,projection_dim=_lowerCAmelCase ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,))
torch.manual_seed(0)
__lowerCamelCase : Optional[int] = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=1_2 ,embedding_dim=_lowerCAmelCase ,num_layers=1 ,)
torch.manual_seed(0)
__lowerCamelCase : List[str] = DDPMScheduler(
variance_type='fixed_small_log' ,prediction_type='sample' ,num_train_timesteps=1_0_0_0 ,clip_sample=_lowerCAmelCase ,clip_sample_range=5.0 ,beta_schedule='squaredcos_cap_v2' ,)
# regular denoising components
torch.manual_seed(0)
__lowerCamelCase : Dict = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase)
__lowerCamelCase : Union[str, Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
__lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__lowerCamelCase : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=_lowerCAmelCase ,projection_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,))
torch.manual_seed(0)
__lowerCamelCase : int = UNetaDConditionModel(
sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,block_out_channels=(3_2, 6_4) ,attention_head_dim=(2, 4) ,class_embed_type='projection' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=_lowerCAmelCase ,layers_per_block=1 ,upcast_attention=_lowerCAmelCase ,use_linear_projection=_lowerCAmelCase ,)
torch.manual_seed(0)
__lowerCamelCase : str = DDIMScheduler(
beta_schedule='scaled_linear' ,beta_start=0.00085 ,beta_end=0.012 ,prediction_type='v_prediction' ,set_alpha_to_one=_lowerCAmelCase ,steps_offset=1 ,)
torch.manual_seed(0)
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : Any = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0):
if str(_lowerCAmelCase).startswith('mps'):
__lowerCamelCase : str = torch.manual_seed(_lowerCAmelCase)
else:
__lowerCamelCase : Dict = torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : int = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Dict):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy')
__lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa)
pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : List[Any] = torch.Generator(device='cpu').manual_seed(0)
__lowerCamelCase : Any = pipe('anime turle' ,generator=_lowerCAmelCase ,output_type='np')
__lowerCamelCase : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCAmelCase ,_lowerCAmelCase)
def lowerCAmelCase ( self : List[str]):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipe.to(_lowerCAmelCase)
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : int = pipe(
'anime turtle' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='np' ,)
__lowerCamelCase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 652 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] , _lowerCAmelCase : T ) -> List[str]:
"""simple docstring"""
__lowercase = data
__lowercase = None
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return F'{self.data}'
class __UpperCamelCase ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase = None
def __iter__( self : int ) -> Iterator[T]:
"""simple docstring"""
__lowercase = self.top
while node:
yield node.data
__lowercase = node.next
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self : Any ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def _a ( self : str ) -> bool:
"""simple docstring"""
return self.top is None
def _a ( self : List[str] , _lowerCAmelCase : T ) -> None:
"""simple docstring"""
__lowercase = Node(_lowerCAmelCase )
if not self.is_empty():
__lowercase = self.top
__lowercase = node
def _a ( self : Union[str, Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _lowerCAmelCase )
__lowercase = self.top
__lowercase = self.top.next
return pop_node.data
def _a ( self : int ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def _a ( self : int ) -> None:
"""simple docstring"""
__lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=1_0 , __lowerCamelCase : Any=3 , __lowerCamelCase : str=3_2 * 4 , __lowerCamelCase : List[str]=3_2 * 6 , __lowerCamelCase : int=4 , __lowerCamelCase : int=3_2 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = mask_feature_size
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
with torch.no_grad():
_snake_case = MaskFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
_snake_case = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
_snake_case = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
_snake_case = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase ( _lowerCAmelCase,_lowerCAmelCase,unittest.TestCase ):
A__ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A__ : Optional[int] = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : str = False
A__ : Any = False
A__ : Optional[int] = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = MaskFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case = MaskFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=_lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=_lowerCAmelCase ).long(),
}
_snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase )
_snake_case = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
_snake_case = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_snake_case = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_snake_case = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case = 1E-4
def snake_case ( ) -> Dict:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_lowerCAmelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_snake_case = model(**_lowerCAmelCase )
_snake_case = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
_snake_case = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
_snake_case = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowerCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_snake_case = model(**_lowerCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_snake_case = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_lowerCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCAmelCase , return_tensors='''pt''' ).to(_lowerCAmelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_snake_case = model(**_lowerCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_snake_case = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_lowerCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCAmelCase )
_snake_case = [el.to(_lowerCAmelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 103 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : Union[str, Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = VersatileDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """cyberpunk 2077"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.text_to_image(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = pipe.image_variation(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 80 | 0 |
import operator as op
UpperCAmelCase_ = """scaler.pt"""
UpperCAmelCase_ = """pytorch_model"""
UpperCAmelCase_ = """random_states"""
UpperCAmelCase_ = """optimizer"""
UpperCAmelCase_ = """scheduler"""
UpperCAmelCase_ = """pytorch_model.bin"""
UpperCAmelCase_ = """pytorch_model.bin.index.json"""
UpperCAmelCase_ = """model.safetensors"""
UpperCAmelCase_ = """model.safetensors.index.json"""
UpperCAmelCase_ = """1.10.2"""
UpperCAmelCase_ = """py38"""
UpperCAmelCase_ = """4.17.0"""
UpperCAmelCase_ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
UpperCAmelCase_ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
UpperCAmelCase_ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
UpperCAmelCase_ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
UpperCAmelCase_ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
UpperCAmelCase_ = """2.0.1"""
UpperCAmelCase_ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
UpperCAmelCase_ = ["""default""", """reduce-overhead""", """max-autotune"""]
UpperCAmelCase_ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase_ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
UpperCAmelCase_ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
UpperCAmelCase_ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""] | 458 |
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(_lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
__lowercase = list(_lowerCAmelCase )
__lowercase = degree
def __add__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowerCAmelCase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowerCAmelCase )
def __sub__( self : int , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : int | float ) -> int | float:
"""simple docstring"""
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
__lowercase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCAmelCase )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.__str__()
def _a ( self : List[str] ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : int | float = 0 ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowerCAmelCase )
def __eq__( self : List[str] , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Dict , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
return not self.__eq__(_lowerCAmelCase )
| 80 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : Any = """ResNetConfig"""
# Base docstring
_UpperCAmelCase : Union[str, Any] = """microsoft/resnet-50"""
_UpperCAmelCase : List[Any] = [1, 20_48, 7, 7]
# Image classification docstring
_UpperCAmelCase : str = """microsoft/resnet-50"""
_UpperCAmelCase : List[str] = """tiger cat"""
_UpperCAmelCase : Union[str, Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ) -> str:
super().__init__()
lowerCamelCase__ : Any = nn.Convad(
_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=kernel_size // 2 , bias=_lowerCAmelCase )
lowerCamelCase__ : int = nn.BatchNormad(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def A_ ( self : List[Any] , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : Tuple = self.convolution(_lowerCAmelCase )
lowerCamelCase__ : str = self.normalization(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig ) -> str:
super().__init__()
lowerCamelCase__ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCamelCase__ : str = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCamelCase__ : Optional[Any] = config.num_channels
def A_ ( self : Tuple , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowerCamelCase__ : str = self.embedder(_lowerCAmelCase )
lowerCamelCase__ : str = self.pooler(_lowerCAmelCase )
return embedding
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 ) -> Tuple:
super().__init__()
lowerCamelCase__ : Any = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 , stride=_lowerCAmelCase , bias=_lowerCAmelCase )
lowerCamelCase__ : List[Any] = nn.BatchNormad(_lowerCAmelCase )
def A_ ( self : int , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : List[Any] = self.convolution(_lowerCAmelCase )
lowerCamelCase__ : int = self.normalization(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ) -> Optional[int]:
super().__init__()
lowerCamelCase__ : Tuple = in_channels != out_channels or stride != 1
lowerCamelCase__ : Optional[Any] = (
ResNetShortCut(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ : Union[str, Any] = nn.Sequential(
ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , activation=_lowerCAmelCase ) , )
lowerCamelCase__ : Optional[Any] = ACTaFN[activation]
def A_ ( self : List[str] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Optional[int] = hidden_state
lowerCamelCase__ : Union[str, Any] = self.layer(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = self.shortcut(_lowerCAmelCase )
hidden_state += residual
lowerCamelCase__ : Dict = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" , UpperCAmelCase : int = 4 ) -> Any:
super().__init__()
lowerCamelCase__ : int = in_channels != out_channels or stride != 1
lowerCamelCase__ : List[str] = out_channels // reduction
lowerCamelCase__ : List[str] = (
ResNetShortCut(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__ : str = nn.Sequential(
ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase ) , ResNetConvLayer(_lowerCAmelCase , _lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase ) , )
lowerCamelCase__ : Optional[Any] = ACTaFN[activation]
def A_ ( self : Optional[int] , UpperCAmelCase : List[str] ) -> Tuple:
lowerCamelCase__ : str = hidden_state
lowerCamelCase__ : Union[str, Any] = self.layer(_lowerCAmelCase )
lowerCamelCase__ : str = self.shortcut(_lowerCAmelCase )
hidden_state += residual
lowerCamelCase__ : Optional[Any] = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : ResNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , ) -> List[str]:
super().__init__()
lowerCamelCase__ : List[str] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
lowerCamelCase__ : List[str] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , activation=config.hidden_act ) , *[layer(_lowerCAmelCase , _lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A_ ( self : Optional[int] , UpperCAmelCase : Tensor ) -> Tensor:
lowerCamelCase__ : Optional[int] = input
for layer in self.layers:
lowerCamelCase__ : Tuple = layer(_lowerCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig ) -> List[str]:
super().__init__()
lowerCamelCase__ : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase__ : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase ) )
def A_ ( self : Any , UpperCAmelCase : Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
lowerCamelCase__ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__ : List[Any] = hidden_states + (hidden_state,)
lowerCamelCase__ : Dict = stage_module(_lowerCAmelCase )
if output_hidden_states:
lowerCamelCase__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase , )
class lowerCAmelCase ( _lowerCAmelCase ):
UpperCAmelCase__ = ResNetConfig
UpperCAmelCase__ = 'resnet'
UpperCAmelCase__ = 'pixel_values'
UpperCAmelCase__ = True
def A_ ( self : List[str] , UpperCAmelCase : Any ) -> Optional[int]:
if isinstance(_lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A_ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=False ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ : Dict = value
_UpperCAmelCase : str = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : List[str] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""", _lowerCAmelCase, )
class lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> str:
super().__init__(_lowerCAmelCase )
lowerCamelCase__ : str = config
lowerCamelCase__ : List[Any] = ResNetEmbeddings(_lowerCAmelCase )
lowerCamelCase__ : List[str] = ResNetEncoder(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A_ ( self : Dict , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Union[str, Any] = self.embedder(_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
lowerCamelCase__ : Any = encoder_outputs[0]
lowerCamelCase__ : str = self.pooler(_lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """, _lowerCAmelCase, )
class lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase : int ) -> Optional[int]:
super().__init__(_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = config.num_labels
lowerCamelCase__ : Optional[int] = ResNetModel(_lowerCAmelCase )
# classification head
lowerCamelCase__ : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A_ ( self : int , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCamelCase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Union[str, Any] = self.resnet(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__ : Dict = self.classifier(_lowerCAmelCase )
lowerCamelCase__ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__ : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__ : Dict = 'single_label_classification'
else:
lowerCamelCase__ : Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCamelCase__ : List[Any] = MSELoss()
if self.num_labels == 1:
lowerCamelCase__ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase__ : List[Any] = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__ : Dict = CrossEntropyLoss()
lowerCamelCase__ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__ : Union[str, Any] = BCEWithLogitsLoss()
lowerCamelCase__ : int = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
lowerCamelCase__ : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n """, _lowerCAmelCase, )
class lowerCAmelCase ( _lowerCAmelCase, _lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase : Tuple ) -> str:
super().__init__(_lowerCAmelCase )
super()._init_backbone(_lowerCAmelCase )
lowerCamelCase__ : Optional[int] = [config.embedding_size] + config.hidden_sizes
lowerCamelCase__ : Optional[int] = ResNetEmbeddings(_lowerCAmelCase )
lowerCamelCase__ : int = ResNetEncoder(_lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@replace_return_docstrings(output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def A_ ( self : int , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ) -> BackboneOutput:
lowerCamelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : Tuple = self.embedder(_lowerCAmelCase )
lowerCamelCase__ : List[str] = self.encoder(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase )
lowerCamelCase__ : Dict = outputs.hidden_states
lowerCamelCase__ : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase__ : str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowerCAmelCase , )
| 295 |
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase )
__lowercase = max(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = ["""ConvNextFeatureExtractor"""]
SCREAMING_SNAKE_CASE_:List[str] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:List[Any] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 662 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , _lowerCAmelCase : Optional[Any]=[2, 2, 3, 2] , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : str=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Dict=[2, 3, 4] , _lowerCAmelCase : Tuple=None , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case :List[str] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case :str = True
__snake_case :Any = False
__snake_case :Any = False
__snake_case :Any = False
__snake_case :int = False
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _a ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case :str = ConvNextConfig
__snake_case :Optional[Any] = False
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 80 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_snake_case : str = ViTImageProcessor if is_vision_available() else None
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = (3, 32, 128)
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
__lowercase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self : List[Any] , **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self : Union[str, Any] , **lowerCamelCase : Dict ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowercase = Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) )
return image_input
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
__lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_lowerCAmelCase , return_tensors="np" )
__lowercase = processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = "test"
__lowercase = processor(text=_lowerCAmelCase )
__lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = "test"
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.char_decode(_lowerCAmelCase )
__lowercase = tokenizer.batch_decode(_lowerCAmelCase )
__lowercase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = None
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = MgpstrProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__lowercase = torch.randn(1 , 27 , 38 )
__lowercase = torch.randn(1 , 27 , 50_257 )
__lowercase = torch.randn(1 , 27 , 30_522 )
__lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 402 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__UpperCamelCase : str = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Tuple = FLAX_MODEL_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 80 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowercase : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowercase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_lowercase : Dict = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_lowercase : Optional[Any] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_lowercase : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_lowercase : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 210 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : int = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
from math import pi
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : CLIPSegForImageSegmentation , lowerCamelCase : CLIPSegProcessor , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Tuple:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__snake_case : Tuple = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : Any = dict(scheduler.config )
__snake_case : List[Any] = 1
__snake_case : Tuple = FrozenDict(lowerCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__snake_case : List[str] = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : List[str] = dict(scheduler.config )
__snake_case : List[str] = True
__snake_case : Any = FrozenDict(lowerCamelCase )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCamelCase , segmentation_processor=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Dict , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : int ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : str , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__snake_case : str = self.segmentation_model(**lowerCamelCase )
__snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__snake_case : List[Any] = self.numpy_to_pil(lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__snake_case : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , )
| 81 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "transfo-xl"
__UpperCAmelCase : List[Any] = ["mems"]
__UpperCAmelCase : Optional[Any] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCamelCase : Tuple=267735 , lowerCamelCase : Optional[int]=[20000, 40000, 200000] , lowerCamelCase : Tuple=1024 , lowerCamelCase : str=1024 , lowerCamelCase : Dict=16 , lowerCamelCase : str=64 , lowerCamelCase : Optional[int]=4096 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : int=False , lowerCamelCase : Dict=18 , lowerCamelCase : Union[str, Any]=1600 , lowerCamelCase : Optional[Any]=1000 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[Any]=-1 , lowerCamelCase : Dict=True , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Tuple=True , lowerCamelCase : List[Any]="normal" , lowerCamelCase : int=0.01 , lowerCamelCase : Optional[int]=0.01 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : str=1E-5 , lowerCamelCase : Tuple=0 , **lowerCamelCase : List[str] , ) -> int:
__snake_case : str = vocab_size
__snake_case : Union[str, Any] = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
__snake_case : Dict = [False] + [True] * len(self.cutoffs )
else:
__snake_case : List[Any] = [False] + [False] * len(self.cutoffs )
__snake_case : List[Any] = d_model
__snake_case : List[str] = d_embed
__snake_case : str = d_head
__snake_case : Union[str, Any] = d_inner
__snake_case : Union[str, Any] = div_val
__snake_case : Optional[Any] = pre_lnorm
__snake_case : Any = n_layer
__snake_case : Union[str, Any] = n_head
__snake_case : Dict = mem_len
__snake_case : Union[str, Any] = same_length
__snake_case : Any = attn_type
__snake_case : Any = clamp_len
__snake_case : Any = sample_softmax
__snake_case : List[str] = adaptive
__snake_case : Optional[int] = dropout
__snake_case : List[Any] = dropatt
__snake_case : Tuple = untie_r
__snake_case : Tuple = init
__snake_case : Any = init_range
__snake_case : Any = proj_init_std
__snake_case : int = init_std
__snake_case : Union[str, Any] = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Optional[int] ) -> str:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __snake_case ( self : List[str] , lowerCamelCase : Any ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 81 |
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : list ) -> None:
__snake_case : str = set_counts
__snake_case : Union[str, Any] = max(lowerCamelCase )
__snake_case : List[Any] = len(lowerCamelCase )
__snake_case : Tuple = [1] * num_sets
__snake_case : Dict = list(range(lowerCamelCase ) )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
__snake_case : List[Any] = self.get_parent(lowerCamelCase )
__snake_case : Tuple = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case : List[str] = 0
__snake_case : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = src_parent
__snake_case : Tuple = self.set_counts[src_parent]
__snake_case : str = max(self.max_set , lowerCamelCase )
return True
def __snake_case ( self : int , lowerCamelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 81 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (CMStochasticIterativeScheduler,)
__UpperCAmelCase : List[Any] = 10
def __snake_case ( self : Any , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case : Optional[int] = 10
__snake_case : Any = self.get_scheduler_config()
__snake_case : List[str] = self.scheduler_classes[0](**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
__snake_case : List[Any] = scheduler.timesteps[0]
__snake_case : List[Any] = scheduler.timesteps[1]
__snake_case : int = self.dummy_sample
__snake_case : Optional[int] = 0.1 * sample
__snake_case : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
__snake_case : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self : Dict ) -> str:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : Tuple = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**lowerCamelCase )
__snake_case : Union[str, Any] = 1
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Union[str, Any] = scheduler.timesteps
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Tuple = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase ):
# 1. scale model input
__snake_case : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# 2. predict noise residual
__snake_case : Union[str, Any] = model(lowerCamelCase , lowerCamelCase )
# 3. predict previous sample x_t-1
__snake_case : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
__snake_case : Tuple = pred_prev_sample
__snake_case : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config()
__snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase )
__snake_case : Tuple = [106, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase )
__snake_case : Union[str, Any] = scheduler.timesteps
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Any = self.dummy_model()
__snake_case : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__snake_case : Tuple = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# 2. predict noise residual
__snake_case : List[str] = model(lowerCamelCase , lowerCamelCase )
# 3. predict previous sample x_t-1
__snake_case : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
__snake_case : List[Any] = pred_prev_sample
__snake_case : Any = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : str = self.get_scheduler_config()
__snake_case : int = scheduler_class(**lowerCamelCase )
__snake_case : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**lowerCamelCase )
__snake_case : Tuple = [39, 30, 12, 1, 0]
__snake_case : Union[str, Any] = len(lowerCamelCase )
with self.assertRaises(lowerCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase , timesteps=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case : str = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase )
| 81 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Dict=400 , lowerCamelCase : str=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=1 / 255 , lowerCamelCase : Any=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : str = min_resolution
__snake_case : int = max_resolution
__snake_case : int = do_resize
__snake_case : Tuple = size
__snake_case : Any = do_normalize
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : str = do_pad
def __snake_case ( self : Any ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=False ) -> List[str]:
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : Dict = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[int] = int(self.size["shortest_edge"] * h / w )
__snake_case : int = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : Any = self.size["shortest_edge"]
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __snake_case ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
__snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> str:
# Initialize image_processing
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> str:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> List[str]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
# prepare image and target
__snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : List[Any] = {"image_id": 39769, "annotations": target}
# encode them
__snake_case : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__snake_case : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : List[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Tuple:
# prepare image, target and masks_path
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : str = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__snake_case : int = ConditionalDetrImageProcessor(format="coco_panoptic" )
__snake_case : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__snake_case : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__snake_case : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 81 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_snake_case : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_snake_case : Dict = "UperNetConfig"
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
__snake_case : Dict = nn.BatchNormad(lowerCamelCase )
__snake_case : List[Any] = nn.ReLU()
def __snake_case ( self : List[Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : Dict = self.conv(lowerCamelCase )
__snake_case : int = self.batch_norm(lowerCamelCase )
__snake_case : Optional[Any] = self.activation(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
__snake_case : Tuple = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : List[str] = input
for layer in self.layers:
__snake_case : Tuple = layer(lowerCamelCase )
return hidden_state
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
__snake_case : Dict = pool_scales
__snake_case : List[str] = align_corners
__snake_case : List[Any] = in_channels
__snake_case : str = channels
__snake_case : Optional[Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
__snake_case : Tuple = []
for ppm in self.blocks:
__snake_case : Any = ppm(lowerCamelCase )
__snake_case : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class a (nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
super().__init__()
__snake_case : Dict = config
__snake_case : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case : Tuple = in_channels
__snake_case : str = config.hidden_size
__snake_case : List[str] = False
__snake_case : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case : List[Any] = nn.ModuleList()
__snake_case : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case : Union[str, Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
__snake_case : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
__snake_case : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> str:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
__snake_case : str = inputs[-1]
__snake_case : int = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
__snake_case : Tuple = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
__snake_case : Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
__snake_case : Dict = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Union[str, Any] = laterals[i - 1].shape[2:]
__snake_case : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__snake_case : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__snake_case : str = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Optional[Any] = self.fpn_bottleneck(lowerCamelCase )
__snake_case : Tuple = self.classifier(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__snake_case : List[Any] = config
__snake_case : List[str] = config.auxiliary_in_channels
__snake_case : List[Any] = config.auxiliary_channels
__snake_case : Tuple = config.auxiliary_num_convs
__snake_case : int = config.auxiliary_concat_input
__snake_case : Optional[int] = in_index
__snake_case : Tuple = (kernel_size // 2) * dilation
__snake_case : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
__snake_case : Union[str, Any] = nn.Identity()
else:
__snake_case : Any = nn.Sequential(*lowerCamelCase )
if self.concat_input:
__snake_case : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
__snake_case : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : Dict ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : Tuple ) -> Optional[int]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__snake_case : List[str] = encoder_hidden_states[self.in_index]
__snake_case : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
__snake_case : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = UperNetConfig
__UpperCAmelCase : int = "pixel_values"
__UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Optional[Any] ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Dict:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = value
_snake_case : Dict = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int ) -> Optional[int]:
super().__init__(lowerCamelCase )
__snake_case : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case : Union[str, Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
__snake_case : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case : Tuple = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
__snake_case : List[Any] = outputs.feature_maps
__snake_case : List[Any] = self.decode_head(lowerCamelCase )
__snake_case : List[str] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Optional[int] = None
if self.auxiliary_head is not None:
__snake_case : Dict = self.auxiliary_head(lowerCamelCase )
__snake_case : Dict = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__snake_case : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case : Union[str, Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case : Any = (logits,) + outputs[1:]
else:
__snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__UpperCAmelCase : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__UpperCAmelCase : str = "question"
__UpperCAmelCase : str = "context"
__UpperCAmelCase : str = "answers"
@property
def __snake_case ( self : Optional[Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 81 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums:
return 0
__snake_case : Any = nums[0]
__snake_case : str = 0
for num in nums[1:]:
__snake_case , __snake_case : List[str] = (
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = "git_vision_model"
def __init__( self : Tuple , lowerCamelCase : str=768 , lowerCamelCase : Optional[int]=3072 , lowerCamelCase : List[Any]=12 , lowerCamelCase : int=12 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=224 , lowerCamelCase : List[Any]=16 , lowerCamelCase : Union[str, Any]="quick_gelu" , lowerCamelCase : List[str]=1E-5 , lowerCamelCase : str=0.0 , lowerCamelCase : Union[str, Any]=0.02 , **lowerCamelCase : Union[str, Any] , ) -> Optional[Any]:
super().__init__(**lowerCamelCase )
__snake_case : str = hidden_size
__snake_case : Dict = intermediate_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : str = num_channels
__snake_case : Dict = patch_size
__snake_case : List[Any] = image_size
__snake_case : Optional[int] = initializer_range
__snake_case : Tuple = attention_dropout
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Tuple = hidden_act
@classmethod
def __snake_case ( cls : Optional[Any] , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
__snake_case , __snake_case : List[Any] = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
__snake_case : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "git"
def __init__( self : Tuple , lowerCamelCase : int=None , lowerCamelCase : List[str]=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : Any=6 , lowerCamelCase : Any=12 , lowerCamelCase : str=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Dict=1024 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Optional[int]=1E-12 , lowerCamelCase : Optional[int]=0 , lowerCamelCase : int="absolute" , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : List[str]=101 , lowerCamelCase : Dict=102 , lowerCamelCase : List[str]=None , **lowerCamelCase : int , ) -> int:
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
__snake_case : str = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
__snake_case : Tuple = GitVisionConfig(**lowerCamelCase )
__snake_case : List[str] = vocab_size
__snake_case : str = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : str = hidden_act
__snake_case : Tuple = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Tuple = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : int = position_embedding_type
__snake_case : List[Any] = use_cache
__snake_case : List[str] = tie_word_embeddings
__snake_case : Union[str, Any] = num_image_with_embedding
__snake_case : int = bos_token_id
__snake_case : int = eos_token_id
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : List[Any] = self.vision_config.to_dict()
__snake_case : Tuple = self.__class__.model_type
return output
| 81 |
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 81 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : List[Any] = 1
__snake_case : Any = 3
__snake_case : str = (32, 32)
__snake_case : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__snake_case : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
__snake_case : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __snake_case ( self : int ) -> Any:
torch.manual_seed(0 )
__snake_case : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCamelCase )
@property
def __snake_case ( self : int ) -> Any:
def extract(*lowerCamelCase : int , **lowerCamelCase : Any ):
class a :
"""simple docstring"""
def __init__( self : Tuple ) -> int:
__snake_case : List[Any] = torch.ones([0] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[str] ) -> Dict:
self.pixel_values.to(lowerCamelCase )
return self
return Out()
return extract
def __snake_case ( self : Tuple ) -> Tuple:
__snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[Any] = self.dummy_cond_unet
__snake_case : Dict = PNDMScheduler(skip_prk_steps=lowerCamelCase )
__snake_case : List[Any] = self.dummy_vae
__snake_case : Optional[int] = self.dummy_text_encoder
__snake_case : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case : List[Any] = 77
__snake_case : int = self.dummy_image.to(lowerCamelCase )
__snake_case : str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__snake_case : Dict = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=self.dummy_extractor , )
__snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase )
__snake_case : Any = alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : str = "A painting of a squirrel eating a burger"
__snake_case : Optional[int] = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : Any = alt_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , )
__snake_case : Dict = output.images
__snake_case : Tuple = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : Dict = alt_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , return_dict=lowerCamelCase , )[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : Tuple = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case ( self : int ) -> Optional[int]:
__snake_case : Optional[int] = self.dummy_cond_unet
__snake_case : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase )
__snake_case : Optional[int] = self.dummy_vae
__snake_case : Optional[int] = self.dummy_text_encoder
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case : List[str] = 77
__snake_case : Optional[int] = self.dummy_image.to(lowerCamelCase )
# put models in fp16
__snake_case : str = unet.half()
__snake_case : Dict = vae.half()
__snake_case : int = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case : int = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=self.dummy_extractor , )
__snake_case : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase )
__snake_case : Dict = alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = "A painting of a squirrel eating a burger"
__snake_case : int = torch.manual_seed(0 )
__snake_case : int = alt_pipe(
[prompt] , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" , image=lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __snake_case ( self : Any ) -> List[str]:
__snake_case : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__snake_case : Any = init_image.resize((760, 504) )
__snake_case : Optional[Any] = "BAAI/AltDiffusion"
__snake_case : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase , safety_checker=lowerCamelCase , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__snake_case : Dict = "A fantasy landscape, trending on artstation"
__snake_case : int = torch.manual_seed(0 )
__snake_case : Dict = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase , output_type="np" , )
__snake_case : Optional[Any] = output.images[0]
__snake_case : Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__snake_case : Any = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__snake_case : str = init_image.resize((768, 512) )
__snake_case : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__snake_case : Any = "BAAI/AltDiffusion"
__snake_case : Any = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase , safety_checker=lowerCamelCase , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__snake_case : Optional[Any] = "A fantasy landscape, trending on artstation"
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=lowerCamelCase , output_type="np" , )
__snake_case : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 81 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Tuple = {"vocab_file": "spm_char.model"}
_snake_case : str = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
_snake_case : Tuple = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : str="<s>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Any , ) -> None:
__snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
__snake_case : Dict = vocab_file
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
@property
def __snake_case ( self : int ) -> Tuple:
return self.sp_model.get_piece_size()
def __snake_case ( self : List[str] ) -> Any:
__snake_case : List[str] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Union[str, Any]:
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self : int , lowerCamelCase : str ) -> Tuple:
__snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : List[str] , lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.sp_model.piece_to_id(lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : Dict ) -> Tuple:
__snake_case : List[Any] = self.sp_model.IdToPiece(lowerCamelCase )
return token
def __snake_case ( self : int , lowerCamelCase : Any ) -> Tuple:
__snake_case : Optional[Any] = []
__snake_case : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
__snake_case : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
__snake_case : str = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + suffix_ones
return ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def __snake_case ( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 81 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any=None , **lowerCamelCase : Any ) -> List[Any]:
super().__init__(features=lowerCamelCase )
__snake_case : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[str]:
import torch
if isinstance(lowerCamelCase , lowerCamelCase ) and column:
if all(
isinstance(lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase )
return column
def __snake_case ( self : List[Any] , lowerCamelCase : str ) -> str:
import torch
if isinstance(lowerCamelCase , (str, bytes, type(lowerCamelCase )) ):
return value
elif isinstance(lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[int] = {}
if isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : Tuple = {"dtype": torch.intaa}
elif isinstance(lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : List[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = np.asarray(lowerCamelCase )
return torch.tensor(lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __snake_case ( self : Optional[int] , lowerCamelCase : List[Any] ) -> List[Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase , "__array__" ) and not isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] )
elif isinstance(lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : dict ) -> List[str]:
return map_nested(self._recursive_tensorize , lowerCamelCase , map_list=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : pa.Table ) -> Mapping:
__snake_case : Union[str, Any] = self.numpy_arrow_extractor().extract_row(lowerCamelCase )
__snake_case : Tuple = self.python_features_decoder.decode_row(lowerCamelCase )
return self.recursive_tensorize(lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : pa.Table ) -> "torch.Tensor":
__snake_case : Optional[int] = self.numpy_arrow_extractor().extract_column(lowerCamelCase )
__snake_case : List[Any] = self.python_features_decoder.decode_column(lowerCamelCase , pa_table.column_names[0] )
__snake_case : str = self.recursive_tensorize(lowerCamelCase )
__snake_case : Optional[int] = self._consolidate(lowerCamelCase )
return column
def __snake_case ( self : Tuple , lowerCamelCase : pa.Table ) -> Mapping:
__snake_case : str = self.numpy_arrow_extractor().extract_batch(lowerCamelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(lowerCamelCase )
__snake_case : Dict = self.recursive_tensorize(lowerCamelCase )
for column_name in batch:
__snake_case : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 81 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
__snake_case : List[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__snake_case : List[str] = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
__snake_case : List[Any] = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_snake_case : Tuple = input("Enter numbers separated by a comma:\n").strip()
_snake_case : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 81 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Optional[Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
from math import sqrt
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any=13 , lowerCamelCase : Tuple=7 , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=True , lowerCamelCase : List[Any]=99 , lowerCamelCase : int=32 , lowerCamelCase : Tuple=5 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Optional[Any]=37 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : int=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[str]=50 , lowerCamelCase : Any=0.02 , lowerCamelCase : Dict=True , lowerCamelCase : Any=None , ) -> Any:
__snake_case : List[Any] = parent
__snake_case : Dict = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Tuple = is_training
__snake_case : Dict = use_input_mask
__snake_case : List[Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : List[str] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : str = initializer_range
__snake_case : Optional[Any] = use_labels
__snake_case : str = scope
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Tuple ) -> List[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def __snake_case ( self : str ) -> Dict:
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = self.prepare_config_and_inputs()
__snake_case : Tuple = True
__snake_case : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case ( self : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , **lowerCamelCase : List[str] , ) -> str:
__snake_case : str = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : Union[str, Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , **lowerCamelCase : Union[str, Any] , ) -> Dict:
__snake_case : List[str] = True
__snake_case : List[Any] = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__snake_case : Union[str, Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : str , **lowerCamelCase : List[str] , ) -> Optional[int]:
__snake_case : Tuple = True
__snake_case : Tuple = True
__snake_case : Tuple = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__snake_case : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__snake_case : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__snake_case : Any = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , *lowerCamelCase : int , ) -> str:
__snake_case : Optional[Any] = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Dict ) -> Dict:
__snake_case , __snake_case , __snake_case , __snake_case : str = self.prepare_config_and_inputs()
__snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__UpperCAmelCase : Dict = (BertGenerationDecoder,) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __snake_case ( self : str ) -> str:
__snake_case : str = BertGenerationEncoderTester(self )
__snake_case : int = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
__snake_case : Tuple = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Tuple = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def __snake_case ( self : str ) -> Dict:
__snake_case : List[Any] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : List[Any] ) -> List[Any]:
__snake_case : str = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__snake_case : Tuple = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__snake_case : Optional[int] = model(lowerCamelCase )[0]
__snake_case : Tuple = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__snake_case : int = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Any = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__snake_case : List[str] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__snake_case : Optional[int] = model(lowerCamelCase )[0]
__snake_case : Any = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__snake_case : int = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 81 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "van"
def __init__( self : Optional[int] , lowerCamelCase : Any=224 , lowerCamelCase : str=3 , lowerCamelCase : Any=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : Dict=[8, 8, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Tuple=1E-6 , lowerCamelCase : Optional[int]=1E-2 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.0 , **lowerCamelCase : Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = image_size
__snake_case : Any = num_channels
__snake_case : Any = patch_sizes
__snake_case : List[Any] = strides
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : int = dropout_rate
| 81 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Optional[int]:
__snake_case : Optional[Any] = params
__snake_case : Union[str, Any] = np.array(lowerCamelCase )
__snake_case : List[Any] = np.array([len(lowerCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]:
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ) -> Tuple:
return len(self.lengths )
def __snake_case ( self : Any ) -> int:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __snake_case ( self : Dict ) -> Dict:
__snake_case : Optional[Any] = self.params.max_model_input_size
__snake_case : str = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCamelCase )} too long sequences.' )
def divide_chunks(lowerCamelCase : List[Any] , lowerCamelCase : Any ):
return [l[i : i + n] for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
__snake_case : List[Any] = []
__snake_case : Dict = []
if self.params.mlm:
__snake_case , __snake_case : List[str] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
__snake_case , __snake_case : Optional[int] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__snake_case : Union[str, Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__snake_case : int = np.insert(lowerCamelCase , 0 , lowerCamelCase )
if sub_s[-1] != sep_id:
__snake_case : List[str] = np.insert(lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase )
assert len(lowerCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCamelCase )
new_tok_ids.extend(lowerCamelCase )
new_lengths.extend([len(lowerCamelCase ) for l in sub_seqs] )
__snake_case : Tuple = np.array(lowerCamelCase )
__snake_case : Union[str, Any] = np.array(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Optional[Any] = len(self )
__snake_case : List[Any] = self.lengths > 11
__snake_case : Optional[Any] = self.token_ids[indices]
__snake_case : List[Any] = self.lengths[indices]
__snake_case : Any = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def __snake_case ( self : Tuple ) -> List[str]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__snake_case : Optional[int] = self.params.special_tok_ids["unk_token"]
__snake_case : Tuple = len(self )
__snake_case : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__snake_case : Dict = (unk_occs / self.lengths) < 0.5
__snake_case : Optional[int] = self.token_ids[indices]
__snake_case : Optional[Any] = self.lengths[indices]
__snake_case : List[str] = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __snake_case ( self : str , lowerCamelCase : Optional[Any] ) -> str:
__snake_case : str = [t[0] for t in batch]
__snake_case : str = [t[1] for t in batch]
assert len(lowerCamelCase ) == len(lowerCamelCase )
# Max for paddings
__snake_case : List[Any] = max(lowerCamelCase )
# Pad token ids
if self.params.mlm:
__snake_case : Tuple = self.params.special_tok_ids["pad_token"]
else:
__snake_case : str = self.params.special_tok_ids["unk_token"]
__snake_case : int = [list(t.astype(lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase )) for t in token_ids]
assert len(tk_ ) == len(lowerCamelCase )
assert all(len(lowerCamelCase ) == max_seq_len_ for t in tk_ )
__snake_case : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__snake_case : List[str] = torch.tensor(lowerCamelCase ) # (bs)
return tk_t, lg_t
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case : Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
__snake_case : int = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__snake_case : List[str] = parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCamelCase ):
def fn(__lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
__snake_case : Tuple = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase )
__snake_case : str = tf.train.Example(features=__lowerCamelCase )
__snake_case : List[str] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
__snake_case : Dict = dataset.select(range(__lowerCamelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Any = tokenize_function(__lowerCamelCase )
__snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
__snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : int = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Any = len(dataset_snapshot["input_ids"] )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
__snake_case : Union[str, Any] = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = parse_args()
main(args)
| 81 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ):
__snake_case : Optional[Any] = 1_0
__snake_case : List[Any] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__snake_case : Any = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [9_7], "text": ["1976"]}] * 1_0,
"id": list(range(__lowerCamelCase ) ),
} , features=__lowerCamelCase , )
return dataset
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=__lowerCamelCase )
return filename
# FILE_CONTENT + files
_snake_case : List[str] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt"
__snake_case : Optional[Any] = FILE_CONTENT
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return filename
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
import bza
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with bza.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
import gzip
__snake_case : List[str] = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__snake_case : List[str] = bytes(__lowerCamelCase , "utf-8" )
with gzip.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case : Any = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
__snake_case : str = bytes(__lowerCamelCase , "utf-8" )
with lza.frame.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case : int = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(__lowerCamelCase , "w" ) as archive:
archive.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
import tarfile
__snake_case : int = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(__lowerCamelCase , "w" ) as f:
f.add(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
import lzma
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
__snake_case : Any = bytes(__lowerCamelCase , "utf-8" )
with lzma.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
import zipfile
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = tmp_path_factory.mktemp("data" ) / "file.xml"
__snake_case : Dict = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return filename
_snake_case : Optional[int] = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_snake_case : int = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_snake_case : List[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_snake_case : List[str] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_snake_case : Optional[int] = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = datasets.Dataset.from_dict(__lowerCamelCase )
__snake_case : int = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
__snake_case : Dict = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(__lowerCamelCase , "w" , newline="" ) as f:
__snake_case : Any = csv.DictWriter(__lowerCamelCase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(__lowerCamelCase , "w" , newline="" ) as f:
__snake_case : str = csv.DictWriter(__lowerCamelCase , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
import bza
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : Tuple = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(__lowerCamelCase , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.join("main_dir" , os.path.basename(__lowerCamelCase ) ) )
f.write(__lowerCamelCase , arcname=os.path.join("main_dir" , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__snake_case : Tuple = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(__lowerCamelCase , "wb" ) as f:
__snake_case : List[str] = pq.ParquetWriter(__lowerCamelCase , schema=__lowerCamelCase )
__snake_case : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__lowerCamelCase ) )] for k in DATA[0]} , schema=__lowerCamelCase )
writer.write_table(__lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__snake_case : str = {"data": DATA}
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__snake_case : Tuple = {"data": DATA_DICT_OF_LISTS}
with open(__lowerCamelCase , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(__lowerCamelCase , "w" ) as f:
for item in DATA:
f.write(json.dumps(__lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(__lowerCamelCase , "w" ) as f:
for item in DATA:
f.write(json.dumps(__lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(__lowerCamelCase , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(__lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(__lowerCamelCase , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(__lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
import gzip
__snake_case : int = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(__lowerCamelCase , "rb" ) as orig_file:
with gzip.open(__lowerCamelCase , "wb" ) as zipped_file:
zipped_file.writelines(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
import gzip
__snake_case : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(__lowerCamelCase , "rb" ) as orig_file:
with gzip.open(__lowerCamelCase , "wb" ) as zipped_file:
zipped_file.writelines(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.join("nested" , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.join("main_dir" , os.path.basename(__lowerCamelCase ) ) )
f.write(__lowerCamelCase , arcname=os.path.join("main_dir" , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(__lowerCamelCase , "w" ) as f:
f.add(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.add(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(__lowerCamelCase , "w" ) as f:
f.add(__lowerCamelCase , arcname=os.path.join("nested" , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = ["0", "1", "2", "3"]
__snake_case : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(__lowerCamelCase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = ["0", "1", "2", "3"]
__snake_case : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(__lowerCamelCase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = ["0", "1", "2", "3"]
__snake_case : Any = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(__lowerCamelCase , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.join("main_dir" , os.path.basename(__lowerCamelCase ) ) )
f.write(__lowerCamelCase , arcname=os.path.join("main_dir" , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename("unsupported.ext" ) )
f.write(__lowerCamelCase , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
__snake_case : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ):
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ):
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
return data_dir
| 81 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : List[Any] = "\\n Text data.\n Second line of data."
_snake_case : Tuple = "file"
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__snake_case : str = input_paths[compression_format]
__snake_case : Optional[Any] = tmp_path / "cache"
__snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
__snake_case : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = "custom_cache"
__snake_case : List[str] = "custom_extracted_dir"
__snake_case : Any = tmp_path / "custom_extracted_path"
if default_extracted:
__snake_case : List[Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
__snake_case : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__snake_case : Optional[int] = xz_file
__snake_case : Optional[int] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__snake_case : str = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : Optional[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__snake_case : Any = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__snake_case : Optional[int] = "./__missing_file__.txt"
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__lowerCamelCase ) as f:
__snake_case : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 81 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_snake_case : str = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , ):
output_path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCamelCase , __lowerCamelCase , f=output_path.as_posix() , input_names=__lowerCamelCase , output_names=__lowerCamelCase , dynamic_axes=__lowerCamelCase , do_constant_folding=__lowerCamelCase , use_external_data_format=__lowerCamelCase , enable_onnx_checker=__lowerCamelCase , opset_version=__lowerCamelCase , )
else:
export(
__lowerCamelCase , __lowerCamelCase , f=output_path.as_posix() , input_names=__lowerCamelCase , output_names=__lowerCamelCase , dynamic_axes=__lowerCamelCase , do_constant_folding=__lowerCamelCase , opset_version=__lowerCamelCase , )
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__snake_case : str = "cpu"
__snake_case : List[Any] = Path(__lowerCamelCase )
# VAE DECODER
__snake_case : Tuple = AutoencoderKL.from_pretrained(model_path + "/vae" )
__snake_case : str = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case : Tuple = vae_decoder.decode
onnx_export(
__lowerCamelCase , model_args=(
torch.randn(1 , __lowerCamelCase , 2_5 , 2_5 ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowerCamelCase , )
del vae_decoder
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_snake_case : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 81 |
_snake_case : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case : Dict = ["a", "b", "c", "d", "e"]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = start
# add current to visited
visited.append(__lowerCamelCase )
__snake_case : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case : Tuple = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case : int = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case : List[Any] = topological_sort("a", [], [])
print(sort)
| 81 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_snake_case : Dict = logging.getLogger(__name__)
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__UpperCAmelCase : bool = field(default=_lowerCAmelCase , metadata={"help": "Whether to SortishSamler or not."} )
__UpperCAmelCase : bool = field(
default=_lowerCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__UpperCAmelCase : bool = field(default=_lowerCAmelCase , metadata={"help": "whether to use adafactor"} )
__UpperCAmelCase : Optional[float] = field(
default=_lowerCAmelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(
default=_lowerCAmelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(default=_lowerCAmelCase , metadata={"help": "Dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(
default=_lowerCAmelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[str] = field(
default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 81 |
def lowerCAmelCase_ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 | 1 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ):
__snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowerCamelCase , help="where to store parsed gold_data_path file" , )
__snake_case : List[Any] = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__snake_case : Any = json.load(__lowerCamelCase )
for dpr_record in tqdm(__lowerCamelCase ):
__snake_case : Optional[int] = dpr_record["question"]
__snake_case : Optional[Any] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 81 |
from math import sqrt
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__snake_case : int = 1_9_2
__snake_case : List[Any] = 7_6_8
__snake_case : Union[str, Any] = 1_2
__snake_case : Optional[Any] = 3
__snake_case : Any = [8_0_0, 1_3_3_3]
__snake_case : List[str] = False
elif yolos_name == "yolos_s_dWr":
__snake_case : List[str] = 3_3_0
__snake_case : Dict = 1_4
__snake_case : List[str] = 6
__snake_case : Union[str, Any] = 1_3_2_0
elif "yolos_s" in yolos_name:
__snake_case : Union[str, Any] = 3_8_4
__snake_case : Dict = 1_5_3_6
__snake_case : int = 1_2
__snake_case : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__snake_case : Union[str, Any] = [8_0_0, 1_3_4_4]
__snake_case : Optional[int] = 9_1
__snake_case : Any = "huggingface/label-files"
__snake_case : Optional[int] = "coco-detection-id2label.json"
__snake_case : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : str = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__snake_case : int = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: config.hidden_size, :]
__snake_case : Dict = in_proj_bias[: config.hidden_size]
__snake_case : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : List[Any] = in_proj_weight[-config.hidden_size :, :]
__snake_case : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCamelCase ):
if "backbone" in name:
__snake_case : Any = name.replace("backbone" , "vit" )
if "cls_token" in name:
__snake_case : Any = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__snake_case : Optional[Any] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__snake_case : Optional[int] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__snake_case : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__snake_case : List[str] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__snake_case : Dict = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__snake_case : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__snake_case : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
__snake_case : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__snake_case : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__snake_case : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__snake_case : List[Any] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__snake_case : Union[str, Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__snake_case : str = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
__snake_case : Tuple = key.split("." )
__snake_case : List[Any] = int(key_split[2] )
__snake_case : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__snake_case : Optional[Any] = val[:dim, :]
__snake_case : Union[str, Any] = val[
dim : dim * 2, :
]
__snake_case : List[Any] = val[-dim:, :]
else:
__snake_case : Tuple = val[:dim]
__snake_case : Optional[int] = val[dim : dim * 2]
__snake_case : Tuple = val[-dim:]
else:
__snake_case : Tuple = val
return orig_state_dict
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : str = get_yolos_config(__lowerCamelCase )
# load original state_dict
__snake_case : int = torch.load(__lowerCamelCase , map_location="cpu" )["model"]
# load 🤗 model
__snake_case : str = YolosForObjectDetection(__lowerCamelCase )
model.eval()
__snake_case : Dict = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__snake_case : Dict = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
__snake_case : str = YolosImageProcessor(format="coco_detection" , size=__lowerCamelCase )
__snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
__snake_case , __snake_case : Tuple = outputs.logits, outputs.pred_boxes
__snake_case , __snake_case : Tuple = None, None
if yolos_name == "yolos_ti":
__snake_case : List[str] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
__snake_case : str = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
__snake_case : List[str] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
__snake_case : Union[str, Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
__snake_case : Tuple = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
__snake_case : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
__snake_case : Optional[int] = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
__snake_case : Dict = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
__snake_case : List[str] = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
__snake_case : Optional[Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
__snake_case : Dict = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__snake_case : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization="hustvl" )
model.push_to_hub(__lowerCamelCase , organization="hustvl" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str:
__snake_case : Optional[Any] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = last_hidden_size
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Tuple = conv_kernel_size
__snake_case : Any = output_stride
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = is_training
__snake_case : Dict = num_labels
__snake_case : Any = initializer_range
__snake_case : Optional[int] = scope
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict:
__snake_case : List[Any] = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = self.num_labels
__snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> Any:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : Any ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ):
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : int = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : str = model.to(lowerCamelCase )
__snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Optional[int] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Tuple = model.to(lowerCamelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
__snake_case : Dict = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__snake_case : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81 | 1 |
_snake_case : Dict = "Tobias Carryer"
from time import time
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Optional[int]=int(time() ) ) -> Optional[Any]: # noqa: B008
__snake_case : int = multiplier
__snake_case : int = increment
__snake_case : Optional[Any] = modulo
__snake_case : List[str] = seed
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_snake_case : str = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 81 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_snake_case : int = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case : List[Any] = "pytorch_model.bin"
_snake_case : Dict = "pytorch_model.bin.index.json"
_snake_case : str = "adapter_config.json"
_snake_case : Dict = "adapter_model.bin"
_snake_case : List[str] = "adapter_model.safetensors"
_snake_case : Dict = "tf_model.h5"
_snake_case : int = "tf_model.h5.index.json"
_snake_case : Union[str, Any] = "model.ckpt"
_snake_case : Dict = "flax_model.msgpack"
_snake_case : Union[str, Any] = "flax_model.msgpack.index.json"
_snake_case : Tuple = "model.safetensors"
_snake_case : Union[str, Any] = "model.safetensors.index.json"
_snake_case : Optional[int] = "config.json"
_snake_case : List[Any] = "preprocessor_config.json"
_snake_case : Optional[int] = FEATURE_EXTRACTOR_NAME
_snake_case : Union[str, Any] = "generation_config.json"
_snake_case : int = "modelcard.json"
_snake_case : Optional[Any] = "▁"
_snake_case : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case : int = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case : List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCAmelCase_ ( __lowerCamelCase ):
if version.parse(__lowerCamelCase ) < version.parse(__lowerCamelCase ):
if "dev" in min_version:
__snake_case : List[str] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__snake_case : List[Any] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : List[Any] , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : int = 8 , **lowerCamelCase : Tuple , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : Dict = do_rescale
__snake_case : Dict = rescale_factor
__snake_case : Optional[Any] = do_pad
__snake_case : Tuple = pad_size
def __snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : float , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] ) -> np.ndarray:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
__snake_case , __snake_case : List[str] = get_image_size(lowerCamelCase )
__snake_case : Optional[Any] = (old_height // size + 1) * size - old_height
__snake_case : List[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : str = do_pad if do_pad is not None else self.do_pad
__snake_case : Any = pad_size if pad_size is not None else self.pad_size
__snake_case : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_pad:
__snake_case : Optional[Any] = [self.pad(lowerCamelCase , size=lowerCamelCase ) for image in images]
__snake_case : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 81 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = IFInpaintingPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def __snake_case ( self : int ) -> str:
return self._get_dummy_components()
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Dict=0 ) -> Dict:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Any = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __snake_case ( self : Optional[int] ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self : Any ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self : List[Any] ) -> Tuple:
self._test_save_load_local()
def __snake_case ( self : List[Any] ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 81 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Optional[int] = 2
class a :
"""simple docstring"""
def __init__( self : int , *, # begin keyword-only arguments
lowerCamelCase : List[Any]="<s>" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Optional[Any]=None , ) -> Optional[int]:
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = bos, unk, pad, eos
__snake_case : Tuple = []
__snake_case : Dict = []
__snake_case : List[str] = {}
__snake_case : Tuple = self.add_symbol(lowerCamelCase )
__snake_case : List[Any] = self.add_symbol(lowerCamelCase )
__snake_case : Dict = self.add_symbol(lowerCamelCase )
__snake_case : List[str] = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
__snake_case : Any = len(self.symbols )
def __eq__( self : Any , lowerCamelCase : Optional[Any] ) -> int:
return self.indices == other.indices
def __getitem__( self : Union[str, Any] , lowerCamelCase : Dict ) -> Any:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : str , lowerCamelCase : str ) -> Any:
return sym in self.indices
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Optional[Any] = cls()
d.add_from_file(lowerCamelCase )
return d
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]=1 , lowerCamelCase : int=False ) -> Union[str, Any]:
if word in self.indices and not overwrite:
__snake_case : Optional[int] = self.indices[word]
__snake_case : Optional[Any] = self.count[idx] + n
return idx
else:
__snake_case : Dict = len(self.symbols )
__snake_case : List[Any] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple ) -> Any:
return 0
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> List[str]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCamelCase ) )
return
__snake_case : Dict = f.readlines()
__snake_case : Dict = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__snake_case , __snake_case : Union[str, Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
__snake_case : int = True
__snake_case , __snake_case : List[str] = line.rsplit(" " , 1 )
else:
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = int(lowerCamelCase )
__snake_case : Dict = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def lowerCAmelCase_ ( __lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case : List[Any] = dict((re.sub(R"@@$" , "" , __lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __lowerCamelCase ), v) for k, v in d.items() )
__snake_case : str = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__snake_case : Optional[int] = d[k] # restore
return da
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case : Dict = os.path.join(__lowerCamelCase , "checkpoint.pt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
__snake_case : List[Any] = chkpt["cfg"]["model"]
# dicts
__snake_case : str = os.path.join(__lowerCamelCase , "dict.txt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__snake_case : int = Dictionary.load(__lowerCamelCase )
__snake_case : Tuple = rewrite_dict_keys(src_dict.indices )
__snake_case : Tuple = len(__lowerCamelCase )
__snake_case : Optional[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
__snake_case : Dict = os.path.join(__lowerCamelCase , "bpecodes" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
# model config
__snake_case : List[str] = os.path.join(__lowerCamelCase , "config.json" )
__snake_case : Optional[Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
__snake_case : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
__snake_case : int = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_0_2_4,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
__snake_case : Dict = chkpt["model"]
# remove unneeded keys
__snake_case : int = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__snake_case : str = model_state_dict.pop(__lowerCamelCase )
else:
__snake_case : Any = model_state_dict.pop(__lowerCamelCase )
__snake_case : Dict = BioGptConfig.from_pretrained(__lowerCamelCase )
__snake_case : Any = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
__snake_case : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : CLIPSegForImageSegmentation , lowerCamelCase : CLIPSegProcessor , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Tuple:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__snake_case : Tuple = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : Any = dict(scheduler.config )
__snake_case : List[Any] = 1
__snake_case : Tuple = FrozenDict(lowerCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__snake_case : List[str] = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : List[str] = dict(scheduler.config )
__snake_case : List[str] = True
__snake_case : Any = FrozenDict(lowerCamelCase )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCamelCase , segmentation_processor=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Dict , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : int ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : str , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__snake_case : str = self.segmentation_model(**lowerCamelCase )
__snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__snake_case : List[Any] = self.numpy_to_pil(lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__snake_case : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , )
| 81 | 1 |
from math import isqrt
def lowerCAmelCase_ ( __lowerCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCamelCase ) + 1 ) )
def lowerCAmelCase_ ( __lowerCamelCase = 1_0**6 ):
__snake_case : Any = 0
__snake_case : List[Any] = 1
__snake_case : Dict = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 |
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : list ) -> None:
__snake_case : str = set_counts
__snake_case : Union[str, Any] = max(lowerCamelCase )
__snake_case : List[Any] = len(lowerCamelCase )
__snake_case : Tuple = [1] * num_sets
__snake_case : Dict = list(range(lowerCamelCase ) )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
__snake_case : List[Any] = self.get_parent(lowerCamelCase )
__snake_case : Tuple = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case : List[str] = 0
__snake_case : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = src_parent
__snake_case : Tuple = self.set_counts[src_parent]
__snake_case : str = max(self.max_set , lowerCamelCase )
return True
def __snake_case ( self : int , lowerCamelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 81 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 81 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Dict=400 , lowerCamelCase : str=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=1 / 255 , lowerCamelCase : Any=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : str = min_resolution
__snake_case : int = max_resolution
__snake_case : int = do_resize
__snake_case : Tuple = size
__snake_case : Any = do_normalize
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : str = do_pad
def __snake_case ( self : Any ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=False ) -> List[str]:
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : Dict = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[int] = int(self.size["shortest_edge"] * h / w )
__snake_case : int = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : Any = self.size["shortest_edge"]
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __snake_case ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
__snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> str:
# Initialize image_processing
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> str:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> List[str]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
# prepare image and target
__snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : List[Any] = {"image_id": 39769, "annotations": target}
# encode them
__snake_case : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__snake_case : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : List[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Tuple:
# prepare image, target and masks_path
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : str = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__snake_case : int = ConditionalDetrImageProcessor(format="coco_panoptic" )
__snake_case : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__snake_case : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__snake_case : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 81 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : Any = 1
__snake_case : Any = 1
__snake_case : Union[str, Any] = {1: 1}
for inputa in range(2 , __lowerCamelCase ):
__snake_case : str = 0
__snake_case : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case : List[str] = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case : str = counter
if counter > pre_counter:
__snake_case : List[Any] = inputa
__snake_case : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 81 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_snake_case : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_snake_case : Dict = "UperNetConfig"
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
__snake_case : Dict = nn.BatchNormad(lowerCamelCase )
__snake_case : List[Any] = nn.ReLU()
def __snake_case ( self : List[Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : Dict = self.conv(lowerCamelCase )
__snake_case : int = self.batch_norm(lowerCamelCase )
__snake_case : Optional[Any] = self.activation(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
__snake_case : Tuple = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : List[str] = input
for layer in self.layers:
__snake_case : Tuple = layer(lowerCamelCase )
return hidden_state
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
__snake_case : Dict = pool_scales
__snake_case : List[str] = align_corners
__snake_case : List[Any] = in_channels
__snake_case : str = channels
__snake_case : Optional[Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
__snake_case : Tuple = []
for ppm in self.blocks:
__snake_case : Any = ppm(lowerCamelCase )
__snake_case : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class a (nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
super().__init__()
__snake_case : Dict = config
__snake_case : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case : Tuple = in_channels
__snake_case : str = config.hidden_size
__snake_case : List[str] = False
__snake_case : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case : List[Any] = nn.ModuleList()
__snake_case : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case : Union[str, Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
__snake_case : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
__snake_case : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> str:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
__snake_case : str = inputs[-1]
__snake_case : int = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
__snake_case : Tuple = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
__snake_case : Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
__snake_case : Dict = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Union[str, Any] = laterals[i - 1].shape[2:]
__snake_case : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__snake_case : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__snake_case : str = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Optional[Any] = self.fpn_bottleneck(lowerCamelCase )
__snake_case : Tuple = self.classifier(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__snake_case : List[Any] = config
__snake_case : List[str] = config.auxiliary_in_channels
__snake_case : List[Any] = config.auxiliary_channels
__snake_case : Tuple = config.auxiliary_num_convs
__snake_case : int = config.auxiliary_concat_input
__snake_case : Optional[int] = in_index
__snake_case : Tuple = (kernel_size // 2) * dilation
__snake_case : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
__snake_case : Union[str, Any] = nn.Identity()
else:
__snake_case : Any = nn.Sequential(*lowerCamelCase )
if self.concat_input:
__snake_case : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
__snake_case : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : Dict ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : Tuple ) -> Optional[int]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__snake_case : List[str] = encoder_hidden_states[self.in_index]
__snake_case : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
__snake_case : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = UperNetConfig
__UpperCAmelCase : int = "pixel_values"
__UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Optional[Any] ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Dict:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = value
_snake_case : Dict = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int ) -> Optional[int]:
super().__init__(lowerCamelCase )
__snake_case : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case : Union[str, Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
__snake_case : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case : Tuple = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
__snake_case : List[Any] = outputs.feature_maps
__snake_case : List[Any] = self.decode_head(lowerCamelCase )
__snake_case : List[str] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Optional[int] = None
if self.auxiliary_head is not None:
__snake_case : Dict = self.auxiliary_head(lowerCamelCase )
__snake_case : Dict = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__snake_case : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case : Union[str, Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case : Any = (logits,) + outputs[1:]
else:
__snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
_snake_case : str = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
_snake_case : int = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = set()
__snake_case : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : Tuple = char
__snake_case : Dict = set(__lowerCamelCase )
return pairs
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Union[str, Any]="<s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[Any]="<unk>" , lowerCamelCase : int="<pad>" , lowerCamelCase : Optional[Any]="<mask>" , **lowerCamelCase : List[str] , ) -> str:
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[Any] = vocab_file
__snake_case : Tuple = merges_file
__snake_case : Optional[Any] = {}
__snake_case : Dict = 0
__snake_case : Optional[int] = 1
__snake_case : Optional[Any] = 2
__snake_case : str = 3
self.add_from_file(lowerCamelCase )
__snake_case : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__snake_case : Union[str, Any] = merges_handle.read().split("\n" )[:-1]
__snake_case : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__snake_case : Optional[int] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__snake_case : Tuple = {}
def __snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : List[str] = [self.cls_token_id]
__snake_case : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : Optional[int] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
return len(self.encoder )
def __snake_case ( self : Dict ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Dict:
if token in self.cache:
return self.cache[token]
__snake_case : int = tuple(lowerCamelCase )
__snake_case : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__snake_case : List[Any] = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__snake_case : Tuple = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case : str = bigram
__snake_case : Optional[int] = []
__snake_case : Optional[int] = 0
while i < len(lowerCamelCase ):
try:
__snake_case : List[Any] = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : List[Any] = tuple(lowerCamelCase )
__snake_case : int = new_word
if len(lowerCamelCase ) == 1:
break
else:
__snake_case : Any = get_pairs(lowerCamelCase )
__snake_case : Any = "@@ ".join(lowerCamelCase )
__snake_case : int = word[:-4]
__snake_case : Optional[Any] = word
return word
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[int]:
__snake_case : List[str] = []
__snake_case : Any = re.findall(R"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def __snake_case ( self : int , lowerCamelCase : List[Any] ) -> List[Any]:
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __snake_case ( self : List[str] , lowerCamelCase : Tuple ) -> int:
return self.decoder.get(lowerCamelCase , self.unk_token )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] ) -> str:
__snake_case : int = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[Any] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.merges_file , lowerCamelCase )
return out_vocab_file, out_merge_file
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] ) -> int:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
__snake_case : List[Any] = f.readlines()
for lineTmp in lines:
__snake_case : int = lineTmp.strip()
__snake_case : str = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__snake_case : List[Any] = line[:idx]
__snake_case : Optional[int] = len(self.encoder )
| 81 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums:
return 0
__snake_case : Any = nums[0]
__snake_case : str = 0
for num in nums[1:]:
__snake_case , __snake_case : List[str] = (
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple=2 , lowerCamelCase : Any=True , lowerCamelCase : Tuple=False , lowerCamelCase : str=10 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=32 * 4 , lowerCamelCase : List[Any]=32 * 6 , lowerCamelCase : Tuple=4 , lowerCamelCase : Tuple=32 , ) -> int:
__snake_case : List[str] = parent
__snake_case : int = batch_size
__snake_case : List[str] = is_training
__snake_case : Dict = use_auxiliary_loss
__snake_case : Dict = num_queries
__snake_case : List[str] = num_channels
__snake_case : Tuple = min_size
__snake_case : Optional[int] = max_size
__snake_case : int = num_labels
__snake_case : int = mask_feature_size
def __snake_case ( self : List[Any] ) -> int:
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase )
__snake_case : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase )
__snake_case : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase ) > 0.5
).float()
__snake_case : Dict = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase ) > 0.5).long()
__snake_case : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case ( self : Any ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __snake_case ( self : int ) -> Dict:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = self.prepare_config_and_inputs()
__snake_case : Tuple = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __snake_case ( self : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[str] ) -> List[str]:
__snake_case : Any = output.encoder_hidden_states
__snake_case : List[str] = output.pixel_decoder_hidden_states
__snake_case : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase ) , config.decoder_config.decoder_layers )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=False ) -> Dict:
with torch.no_grad():
__snake_case : int = MaskFormerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
__snake_case : Union[str, Any] = model(lowerCamelCase , output_hidden_states=lowerCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
def comm_check_on_output(lowerCamelCase : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__snake_case : Dict = model(pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase )
__snake_case : Optional[Any] = model(lowerCamelCase )
comm_check_on_output(lowerCamelCase )
__snake_case : str = model(
pixel_values=lowerCamelCase , pixel_mask=lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
comm_check_on_output(lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : int = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = False
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case : Union[str, Any] = MaskFormerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def __snake_case ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def __snake_case ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def __snake_case ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __snake_case ( self : str ) -> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> List[Any]:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@slow
def __snake_case ( self : List[Any] ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
__snake_case : int = MaskFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Union[str, Any] = (self.model_tester.min_size,) * 2
__snake_case : List[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase ).long(),
}
__snake_case : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase )
__snake_case : Dict = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase , **lowerCamelCase , output_hidden_states=lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(lowerCamelCase ).to(lowerCamelCase )
__snake_case : Dict = model(**lowerCamelCase , output_attentions=lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def __snake_case ( self : Optional[int] ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__snake_case : int = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
__snake_case : Optional[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : str = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : Any ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
__snake_case : List[Any] = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
__snake_case : Union[str, Any] = True
__snake_case : Optional[int] = True
__snake_case : Any = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : str = model(lowerCamelCase , mask_labels=lowerCamelCase , class_labels=lowerCamelCase )
__snake_case : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__snake_case : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__snake_case : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__snake_case : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case : int = 1E-4
def lowerCAmelCase_ ( ):
__snake_case : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Optional[Any] ) -> List[str]:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def __snake_case ( self : Dict ) -> Optional[int]:
__snake_case : Tuple = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(lowerCamelCase )
__snake_case : Dict = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Optional[int] = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
__snake_case : Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case : Optional[int] = model(**lowerCamelCase )
__snake_case : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
__snake_case : Optional[int] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
__snake_case : Tuple = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCamelCase )
.eval()
)
__snake_case : Any = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
__snake_case : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case : str = model(**lowerCamelCase )
# masks_queries_logits
__snake_case : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : Union[str, Any] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
__snake_case : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Optional[int] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def __snake_case ( self : Any ) -> int:
__snake_case : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(lowerCamelCase )
.eval()
)
__snake_case : Tuple = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
__snake_case : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase , (1, 3, 800, 1088) )
with torch.no_grad():
__snake_case : Optional[Any] = model(**lowerCamelCase )
# masks_queries_logits
__snake_case : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : Optional[int] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__snake_case : List[Any] = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
# class_queries_logits
__snake_case : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCamelCase )
.eval()
)
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
__snake_case : str = inputs["pixel_values"].to(lowerCamelCase )
__snake_case : Tuple = [el.to(lowerCamelCase ) for el in inputs["mask_labels"]]
__snake_case : Tuple = [el.to(lowerCamelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 81 |
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 81 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "falcon"
__UpperCAmelCase : str = ["past_key_values"]
def __init__( self : Optional[Any] , lowerCamelCase : str=65024 , lowerCamelCase : str=4544 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Any=71 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Any=True , lowerCamelCase : Tuple=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : str=None , lowerCamelCase : List[str]=False , lowerCamelCase : int=False , lowerCamelCase : str=True , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=False , lowerCamelCase : Optional[int]=11 , lowerCamelCase : Optional[int]=11 , **lowerCamelCase : Dict , ) -> Optional[Any]:
__snake_case : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case : Optional[Any] = kwargs.pop("n_embed" , lowerCamelCase )
__snake_case : str = hidden_size if n_embed is None else n_embed
__snake_case : List[str] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Tuple = layer_norm_epsilon
__snake_case : List[str] = initializer_range
__snake_case : Tuple = use_cache
__snake_case : List[Any] = hidden_dropout
__snake_case : Tuple = attention_dropout
__snake_case : int = bos_token_id
__snake_case : Any = eos_token_id
__snake_case : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
__snake_case : Optional[Any] = alibi
__snake_case : Any = new_decoder_architecture
__snake_case : List[str] = multi_query # Ignored when new_decoder_architecture is True
__snake_case : int = parallel_attn
__snake_case : Union[str, Any] = bias
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Dict ) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def __snake_case ( self : Any ) -> List[Any]:
return not self.alibi
| 81 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case : Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
__snake_case : int = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__snake_case : List[str] = parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCamelCase ):
def fn(__lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
__snake_case : Tuple = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase )
__snake_case : str = tf.train.Example(features=__lowerCamelCase )
__snake_case : List[str] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
__snake_case : Dict = dataset.select(range(__lowerCamelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Any = tokenize_function(__lowerCamelCase )
__snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
__snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : int = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Any = len(dataset_snapshot["input_ids"] )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
__snake_case : Union[str, Any] = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = parse_args()
main(args)
| 81 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 | 1 |
from ...configuration_utils import PretrainedConfig
_snake_case : List[str] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = "tapas"
def __init__( self : str , lowerCamelCase : List[str]=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Dict=12 , lowerCamelCase : Optional[Any]=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : str=1024 , lowerCamelCase : str=[3, 256, 256, 2, 256, 256, 10] , lowerCamelCase : List[str]=0.02 , lowerCamelCase : str=1E-12 , lowerCamelCase : List[str]=0 , lowerCamelCase : int=10.0 , lowerCamelCase : Optional[int]=0 , lowerCamelCase : Union[str, Any]=1.0 , lowerCamelCase : List[str]=None , lowerCamelCase : int=1.0 , lowerCamelCase : Dict=False , lowerCamelCase : int=None , lowerCamelCase : str=1.0 , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[str]=False , lowerCamelCase : int=False , lowerCamelCase : Optional[int]="ratio" , lowerCamelCase : Optional[int]=None , lowerCamelCase : int=None , lowerCamelCase : str=64 , lowerCamelCase : str=32 , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=False , lowerCamelCase : int=False , lowerCamelCase : str=True , lowerCamelCase : List[str]=False , lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__snake_case : Any = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : Optional[Any] = intermediate_size
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_sizes
__snake_case : Optional[int] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
__snake_case : Union[str, Any] = positive_label_weight
__snake_case : Optional[Any] = num_aggregation_labels
__snake_case : Union[str, Any] = aggregation_loss_weight
__snake_case : Any = use_answer_as_supervision
__snake_case : Union[str, Any] = answer_loss_importance
__snake_case : Dict = use_normalized_answer_loss
__snake_case : Optional[Any] = huber_loss_delta
__snake_case : Any = temperature
__snake_case : Tuple = aggregation_temperature
__snake_case : List[str] = use_gumbel_for_cells
__snake_case : int = use_gumbel_for_aggregation
__snake_case : int = average_approximation_function
__snake_case : Dict = cell_selection_preference
__snake_case : List[Any] = answer_loss_cutoff
__snake_case : Union[str, Any] = max_num_rows
__snake_case : Any = max_num_columns
__snake_case : List[str] = average_logits_per_cell
__snake_case : Dict = select_one_column
__snake_case : str = allow_empty_column_selection
__snake_case : Tuple = init_cell_selection_weights_to_zero
__snake_case : Optional[Any] = reset_position_index_per_cell
__snake_case : str = disable_per_token_loss
# Aggregation hyperparameters
__snake_case : List[Any] = aggregation_labels
__snake_case : Tuple = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase ):
__snake_case : Dict = {int(lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 81 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__snake_case : List[Any] = cst_fwd.get(__lowerCamelCase , np.inf )
__snake_case : Union[str, Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__snake_case : Tuple = new_cost_f
__snake_case : Optional[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__snake_case : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = -1
__snake_case : Optional[int] = set()
__snake_case : int = set()
__snake_case : Dict = {source: 0}
__snake_case : int = {destination: 0}
__snake_case : str = {source: None}
__snake_case : List[str] = {destination: None}
__snake_case : PriorityQueue[Any] = PriorityQueue()
__snake_case : PriorityQueue[Any] = PriorityQueue()
__snake_case : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__snake_case , __snake_case : Any = queue_forward.get()
visited_forward.add(__lowerCamelCase )
__snake_case , __snake_case : Optional[int] = queue_backward.get()
visited_backward.add(__lowerCamelCase )
__snake_case : Optional[Any] = pass_and_relaxation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
__snake_case : Any = pass_and_relaxation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__snake_case : Optional[Any] = shortest_distance
return shortest_path_distance
_snake_case : Dict = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_snake_case : int = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0 ):
__snake_case , __snake_case : Optional[Any] = 1, 1
__snake_case : Tuple = 2
while True:
__snake_case : List[str] = 0
__snake_case : Optional[int] = fa + fa
__snake_case , __snake_case : str = fa, f
index += 1
for _ in str(__lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 81 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "van"
def __init__( self : Optional[int] , lowerCamelCase : Any=224 , lowerCamelCase : str=3 , lowerCamelCase : Any=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : Dict=[8, 8, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Tuple=1E-6 , lowerCamelCase : Optional[int]=1E-2 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.0 , **lowerCamelCase : Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = image_size
__snake_case : Any = num_channels
__snake_case : Any = patch_sizes
__snake_case : List[Any] = strides
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : int = dropout_rate
| 81 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer
__UpperCAmelCase : str = XLMRobertaTokenizerFast
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Tuple = True
def __snake_case ( self : Tuple ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Optional[int] = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Tuple ) -> int:
__snake_case : Optional[int] = "<pad>"
__snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1002 )
def __snake_case ( self : Optional[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Union[str, Any] = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__snake_case : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__snake_case : int = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __snake_case ( self : Optional[int] ) -> int:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__snake_case : str = tempfile.mkdtemp()
__snake_case : List[str] = tokenizer_r.save_pretrained(lowerCamelCase )
__snake_case : Any = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__snake_case : Tuple = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
__snake_case : Any = tokenizer_r.from_pretrained(lowerCamelCase )
__snake_case : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__snake_case : str = tempfile.mkdtemp()
__snake_case : int = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
__snake_case : Any = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
__snake_case : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase )
__snake_case : Optional[int] = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Union[str, Any] = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
__snake_case : Optional[Any] = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case : Tuple = tokenizer_r.from_pretrained(lowerCamelCase )
__snake_case : Tuple = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@cached_property
def __snake_case ( self : Union[str, Any] ) -> int:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __snake_case ( self : List[str] ) -> Dict:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
__snake_case : int = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase )
__snake_case : Union[str, Any] = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : Any = self.get_rust_tokenizer()
__snake_case : Dict = "I was born in 92000, and this is falsé."
__snake_case : Dict = tokenizer.tokenize(lowerCamelCase )
__snake_case : Any = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__snake_case : str = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__snake_case : Any = self.get_rust_tokenizer()
__snake_case : Dict = tokenizer.encode(lowerCamelCase )
__snake_case : int = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[str] = "Hello World!"
__snake_case : Any = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__snake_case : str = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
# fmt: off
__snake_case : Union[str, Any] = {"input_ids": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case : Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
__snake_case : int = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__snake_case : List[str] = parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCamelCase ):
def fn(__lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
__snake_case : Tuple = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase )
__snake_case : str = tf.train.Example(features=__lowerCamelCase )
__snake_case : List[str] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
__snake_case : Dict = dataset.select(range(__lowerCamelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Any = tokenize_function(__lowerCamelCase )
__snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
__snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : int = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Any = len(dataset_snapshot["input_ids"] )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
__snake_case : Union[str, Any] = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = parse_args()
main(args)
| 81 | 1 |
from collections.abc import Sequence
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = False ):
if not arr:
return 0
__snake_case : Optional[Any] = 0 if allow_empty_subarrays else float("-inf" )
__snake_case : Optional[Any] = 0.0
for num in arr:
__snake_case : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
__snake_case : List[Any] = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 81 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : List[Any] = "\\n Text data.\n Second line of data."
_snake_case : Tuple = "file"
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__snake_case : str = input_paths[compression_format]
__snake_case : Optional[Any] = tmp_path / "cache"
__snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
__snake_case : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = "custom_cache"
__snake_case : List[str] = "custom_extracted_dir"
__snake_case : Any = tmp_path / "custom_extracted_path"
if default_extracted:
__snake_case : List[Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
__snake_case : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__snake_case : Optional[int] = xz_file
__snake_case : Optional[int] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__snake_case : str = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : Optional[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__snake_case : Any = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__snake_case : Optional[int] = "./__missing_file__.txt"
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__lowerCamelCase ) as f:
__snake_case : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 81 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 81 |
_snake_case : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case : Dict = ["a", "b", "c", "d", "e"]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = start
# add current to visited
visited.append(__lowerCamelCase )
__snake_case : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case : Tuple = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case : int = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case : List[Any] = topological_sort("a", [], [])
print(sort)
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Any = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
def lowerCAmelCase_ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any]=13 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : Any=3 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : List[Any]=[32, 64, 128] , lowerCamelCase : Optional[Any]=[1, 2, 1] , lowerCamelCase : str=[2, 2, 4] , lowerCamelCase : str=2 , lowerCamelCase : str=2.0 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : str=False , lowerCamelCase : Tuple=True , lowerCamelCase : int=0.02 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=True , lowerCamelCase : Tuple=10 , lowerCamelCase : int=8 , lowerCamelCase : Any=["stage1", "stage2"] , lowerCamelCase : Optional[int]=[1, 2] , ) -> Optional[Any]:
__snake_case : Optional[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : List[str] = image_size
__snake_case : List[Any] = patch_size
__snake_case : str = num_channels
__snake_case : Any = embed_dim
__snake_case : List[Any] = hidden_sizes
__snake_case : List[Any] = depths
__snake_case : Union[str, Any] = num_heads
__snake_case : int = window_size
__snake_case : Any = mlp_ratio
__snake_case : int = qkv_bias
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Optional[int] = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : str = use_absolute_embeddings
__snake_case : List[Any] = patch_norm
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Dict = initializer_range
__snake_case : Union[str, Any] = is_training
__snake_case : Dict = scope
__snake_case : Union[str, Any] = use_labels
__snake_case : List[Any] = type_sequence_label_size
__snake_case : str = encoder_stride
__snake_case : str = out_features
__snake_case : Any = out_indices
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[str] = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict ) -> List[Any]:
__snake_case : int = FocalNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase )
__snake_case : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Any = None
__snake_case : Optional[int] = FocalNetBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> List[str]:
__snake_case : int = FocalNetForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Any = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : Optional[int] = 1
__snake_case : Tuple = FocalNetForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Optional[Any] = self.type_sequence_label_size
__snake_case : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Dict = 1
__snake_case : int = FocalNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : Optional[int] = FocalNetModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=lowerCamelCase , embed_dim=37 , has_text_modality=lowerCamelCase )
def __snake_case ( self : str ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[str] ) -> Any:
return
def __snake_case ( self : int ) -> str:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def __snake_case ( self : str ) -> str:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def __snake_case ( self : Tuple ) -> Any:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __snake_case ( self : Optional[int] ) -> int:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __snake_case ( self : List[Any] ) -> List[str]:
pass
def __snake_case ( self : str ) -> Optional[int]:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : int = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = model_class(lowerCamelCase )
__snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] ) -> Any:
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Optional[int] = outputs.hidden_states
__snake_case : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# FocalNet has a different seq_length
__snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = reshaped_hidden_states[0].shape
__snake_case : str = (
reshaped_hidden_states[0].view(lowerCamelCase , lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : Any = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : str ) -> Dict:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = 3
__snake_case : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
self.check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase , (padded_height, padded_width) )
@slow
def __snake_case ( self : Optional[Any] ) -> Tuple:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = FocalNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Dict ) -> Tuple:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__snake_case : str = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[Any] = model(**lowerCamelCase )
# verify the logits
__snake_case : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Any = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = FocalNetConfig
__UpperCAmelCase : Dict = False
def __snake_case ( self : int ) -> Tuple:
__snake_case : Tuple = FocalNetModelTester(self )
| 81 |
from math import sqrt
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : List[Any] = "\\n Text data.\n Second line of data."
_snake_case : Tuple = "file"
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__snake_case : str = input_paths[compression_format]
__snake_case : Optional[Any] = tmp_path / "cache"
__snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
__snake_case : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = "custom_cache"
__snake_case : List[str] = "custom_extracted_dir"
__snake_case : Any = tmp_path / "custom_extracted_path"
if default_extracted:
__snake_case : List[Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
__snake_case : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__snake_case : Optional[int] = xz_file
__snake_case : Optional[int] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__snake_case : str = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : Optional[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__snake_case : Any = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__snake_case : Optional[int] = "./__missing_file__.txt"
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__lowerCamelCase ) as f:
__snake_case : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 81 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str:
__snake_case : Optional[Any] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = last_hidden_size
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Tuple = conv_kernel_size
__snake_case : Any = output_stride
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = is_training
__snake_case : Dict = num_labels
__snake_case : Any = initializer_range
__snake_case : Optional[int] = scope
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict:
__snake_case : List[Any] = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = self.num_labels
__snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> Any:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : Any ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ):
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : int = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : str = model.to(lowerCamelCase )
__snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Optional[int] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Tuple = model.to(lowerCamelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
__snake_case : Dict = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__snake_case : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case : Dict = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
def __init__( self : int , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : float = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , **lowerCamelCase : List[str] , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : List[Any] = size if size is not None else {"shortest_edge": 384}
__snake_case : List[str] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__snake_case : Optional[int] = do_resize
__snake_case : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
__snake_case : List[str] = crop_pct if crop_pct is not None else 224 / 256
__snake_case : List[str] = resample
__snake_case : Union[str, Any] = do_rescale
__snake_case : List[str] = rescale_factor
__snake_case : int = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : float , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[str] , ) -> np.ndarray:
__snake_case : Optional[int] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__snake_case : List[Any] = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__snake_case : str = int(shortest_edge / crop_pct )
__snake_case : List[Any] = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__snake_case : Optional[Any] = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : int , ) -> Union[str, Any]:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Tuple , ) -> np.ndarray:
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : float = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : List[str] , ) -> PIL.Image.Image:
__snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__snake_case : Dict = resample if resample is not None else self.resample
__snake_case : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : str = image_std if image_std is not None else self.image_std
__snake_case : str = size if size is not None else self.size
__snake_case : List[str] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__snake_case : Any = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__snake_case : Tuple = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__snake_case : Optional[int] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Any = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__snake_case : List[Any] = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__snake_case : List[Any] = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : int = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 81 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_snake_case : int = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
__snake_case : Union[str, Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = str(__lowerCamelCase )
dataset_info.write_to_directory(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfo.from_directory(__lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCamelCase , "dataset_info.json" ) )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
__snake_case : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(__lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__snake_case : Dict = yaml.safe_dump(__lowerCamelCase )
__snake_case : Optional[Any] = yaml.safe_load(__lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase_ ( ):
__snake_case : List[Any] = DatasetInfo()
__snake_case : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = str(__lowerCamelCase )
dataset_infos_dict.write_to_directory(__lowerCamelCase )
__snake_case : Any = DatasetInfosDict.from_directory(__lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__snake_case : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__snake_case : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCamelCase , "README.md" ) )
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["pixel_values"]
def __init__( self : List[Any] , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : int = 8 , **lowerCamelCase : Tuple , ) -> None:
super().__init__(**lowerCamelCase )
__snake_case : Dict = do_rescale
__snake_case : Dict = rescale_factor
__snake_case : Optional[Any] = do_pad
__snake_case : Tuple = pad_size
def __snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : float , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Optional[int] ) -> np.ndarray:
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
__snake_case , __snake_case : List[str] = get_image_size(lowerCamelCase )
__snake_case : Optional[Any] = (old_height // size + 1) * size - old_height
__snake_case : List[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : str = do_pad if do_pad is not None else self.do_pad
__snake_case : Any = pad_size if pad_size is not None else self.pad_size
__snake_case : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_pad:
__snake_case : Optional[Any] = [self.pad(lowerCamelCase , size=lowerCamelCase ) for image in images]
__snake_case : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 81 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case : Union[str, Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
_snake_case : Optional[Any] = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = ["input_ids", "attention_mask"]
__UpperCAmelCase : Optional[int] = RobertaTokenizer
def __init__( self : Optional[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[Any]="replace" , lowerCamelCase : str="<s>" , lowerCamelCase : int="</s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : int="<s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Any="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Dict=True , **lowerCamelCase : Tuple , ) -> Union[str, Any]:
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__snake_case : Any = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
__snake_case : List[Any] = add_prefix_space
__snake_case : Optional[Any] = pre_tok_class(**lowerCamelCase )
__snake_case : Any = add_prefix_space
__snake_case : str = "post_processor"
__snake_case : Optional[Any] = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
__snake_case : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["sep"] )
if "cls" in state:
__snake_case : List[Any] = tuple(state["cls"] )
__snake_case : Union[str, Any] = False
if state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
__snake_case : Dict = add_prefix_space
__snake_case : Optional[int] = True
if state.get("trim_offsets" , lowerCamelCase ) != trim_offsets:
__snake_case : Optional[Any] = trim_offsets
__snake_case : Optional[Any] = True
if changes_to_apply:
__snake_case : List[str] = getattr(lowerCamelCase , state.pop("type" ) )
__snake_case : List[Any] = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self : str , lowerCamelCase : Dict ) -> List[Any]:
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
__snake_case : Optional[int] = value
def __snake_case ( self : Optional[int] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : int ) -> BatchEncoding:
__snake_case : Union[str, Any] = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Any , *lowerCamelCase : List[str] , **lowerCamelCase : Optional[Any] ) -> BatchEncoding:
__snake_case : Dict = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
__snake_case : Optional[int] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : int=None ) -> int:
__snake_case : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 81 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums:
return 0
__snake_case : Any = nums[0]
__snake_case : str = 0
for num in nums[1:]:
__snake_case , __snake_case : List[str] = (
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : CLIPSegForImageSegmentation , lowerCamelCase : CLIPSegProcessor , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Tuple:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__snake_case : Tuple = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : Any = dict(scheduler.config )
__snake_case : List[Any] = 1
__snake_case : Tuple = FrozenDict(lowerCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__snake_case : List[str] = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : List[str] = dict(scheduler.config )
__snake_case : List[str] = True
__snake_case : Any = FrozenDict(lowerCamelCase )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCamelCase , segmentation_processor=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Dict , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : int ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : str , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__snake_case : str = self.segmentation_model(**lowerCamelCase )
__snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__snake_case : List[Any] = self.numpy_to_pil(lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__snake_case : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , )
| 81 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Dict=400 , lowerCamelCase : str=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=1 / 255 , lowerCamelCase : Any=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : str = min_resolution
__snake_case : int = max_resolution
__snake_case : int = do_resize
__snake_case : Tuple = size
__snake_case : Any = do_normalize
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : str = do_pad
def __snake_case ( self : Any ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=False ) -> List[str]:
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : Dict = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[int] = int(self.size["shortest_edge"] * h / w )
__snake_case : int = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : Any = self.size["shortest_edge"]
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __snake_case ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
__snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> str:
# Initialize image_processing
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> str:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> List[str]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
# prepare image and target
__snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : List[Any] = {"image_id": 39769, "annotations": target}
# encode them
__snake_case : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__snake_case : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : List[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Tuple:
# prepare image, target and masks_path
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : str = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__snake_case : int = ConditionalDetrImageProcessor(format="coco_panoptic" )
__snake_case : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__snake_case : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__snake_case : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 81 |
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : list ) -> None:
__snake_case : str = set_counts
__snake_case : Union[str, Any] = max(lowerCamelCase )
__snake_case : List[Any] = len(lowerCamelCase )
__snake_case : Tuple = [1] * num_sets
__snake_case : Dict = list(range(lowerCamelCase ) )
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
__snake_case : List[Any] = self.get_parent(lowerCamelCase )
__snake_case : Tuple = self.get_parent(lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case : List[str] = 0
__snake_case : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = src_parent
__snake_case : Tuple = self.set_counts[src_parent]
__snake_case : str = max(self.max_set , lowerCamelCase )
return True
def __snake_case ( self : int , lowerCamelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 81 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
__snake_case : Tuple = np.concatenate(__lowerCamelCase , axis=0 )
__snake_case : Tuple = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : List[str] = image.transpose(0 , 3 , 1 , 2 )
__snake_case : List[str] = 2.0 * image - 1.0
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case : int = torch.cat(__lowerCamelCase , dim=0 )
return image
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.9_9_9_5 ):
if not isinstance(__lowerCamelCase , np.ndarray ):
__snake_case : Any = True
__snake_case : Any = va.device
__snake_case : str = va.cpu().numpy()
__snake_case : Optional[int] = va.cpu().numpy()
__snake_case : List[Any] = np.sum(va * va / (np.linalg.norm(__lowerCamelCase ) * np.linalg.norm(__lowerCamelCase )) )
if np.abs(__lowerCamelCase ) > DOT_THRESHOLD:
__snake_case : List[Any] = (1 - t) * va + t * va
else:
__snake_case : Any = np.arccos(__lowerCamelCase )
__snake_case : Optional[Any] = np.sin(__lowerCamelCase )
__snake_case : int = theta_a * t
__snake_case : List[Any] = np.sin(__lowerCamelCase )
__snake_case : str = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case : Optional[Any] = sin_theta_t / sin_theta_a
__snake_case : Dict = sa * va + sa * va
if inputs_are_torch:
__snake_case : List[Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
return va
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = F.normalize(__lowerCamelCase , dim=-1 )
__snake_case : Optional[Any] = F.normalize(__lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for param in model.parameters():
__snake_case : List[str] = value
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCamelCase : CLIPFeatureExtractor , lowerCamelCase : Any=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Tuple=None , ) -> Tuple:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , clip_model=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , feature_extractor=lowerCamelCase , coca_model=lowerCamelCase , coca_tokenizer=lowerCamelCase , coca_transform=lowerCamelCase , )
__snake_case : str = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCamelCase )
else feature_extractor.size["shortest_edge"]
)
__snake_case : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCamelCase )
set_requires_grad(self.clip_model , lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : int ) -> Dict:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
set_requires_grad(self.vae , lowerCamelCase )
def __snake_case ( self : int ) -> List[str]:
set_requires_grad(self.vae , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Dict:
set_requires_grad(self.unet , lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
set_requires_grad(self.unet , lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Optional[Any]:
# get the original timestep using init_timestep
__snake_case : str = min(int(num_inference_steps * strength ) , lowerCamelCase )
__snake_case : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
__snake_case : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : List[Any]=None ) -> Optional[Any]:
if not isinstance(lowerCamelCase , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase )}' )
__snake_case : Optional[int] = image.to(device=lowerCamelCase , dtype=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Any = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
__snake_case : List[Any] = torch.cat(lowerCamelCase , dim=0 )
else:
__snake_case : Any = self.vae.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Union[str, Any] = 0.1_82_15 * init_latents
__snake_case : Optional[Any] = init_latents.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Union[str, Any] = randn_tensor(init_latents.shape , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
# get latents
__snake_case : List[Any] = self.scheduler.add_noise(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Tuple = init_latents
return latents
def __snake_case ( self : Optional[int] , lowerCamelCase : Dict ) -> List[Any]:
__snake_case : Any = self.coca_transform(lowerCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case : Union[str, Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__snake_case : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[Any] ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.feature_extractor.preprocess(lowerCamelCase )
__snake_case : Optional[int] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__snake_case : Tuple = self.clip_model.get_image_features(lowerCamelCase )
__snake_case : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
__snake_case : List[str] = image_embeddings_clip.repeat_interleave(lowerCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __snake_case ( self : str , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int , ) -> List[str]:
__snake_case : Optional[Any] = latents.detach().requires_grad_()
__snake_case : Dict = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : Optional[Any] = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__snake_case : Tuple = self.scheduler.alphas_cumprod[timestep]
__snake_case : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case : Union[str, Any] = torch.sqrt(lowerCamelCase )
__snake_case : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCamelCase ):
__snake_case : Any = self.scheduler.sigmas[index]
__snake_case : Any = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Tuple = 1 / 0.1_82_15 * sample
__snake_case : int = self.vae.decode(lowerCamelCase ).sample
__snake_case : str = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : str = transforms.Resize(self.feature_extractor_size )(lowerCamelCase )
__snake_case : List[str] = self.normalize(lowerCamelCase ).to(latents.dtype )
__snake_case : List[str] = self.clip_model.get_image_features(lowerCamelCase )
__snake_case : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCamelCase )
__snake_case : str = spherical_dist_loss(lowerCamelCase , lowerCamelCase ).mean() * clip_guidance_scale
__snake_case : Union[str, Any] = -torch.autograd.grad(lowerCamelCase , lowerCamelCase )[0]
if isinstance(self.scheduler , lowerCamelCase ):
__snake_case : str = latents.detach() + grads * (sigma**2)
__snake_case : List[Any] = noise_pred_original
else:
__snake_case : str = noise_pred_original - torch.sqrt(lowerCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = 512 , lowerCamelCase : Optional[int] = 512 , lowerCamelCase : float = 0.6 , lowerCamelCase : Optional[int] = 50 , lowerCamelCase : Optional[float] = 7.5 , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[float] = 100 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : float = 0.8 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , ) -> Any:
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(lowerCamelCase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(lowerCamelCase , torch.Generator ) and batch_size > 1:
__snake_case : int = [generator] + [None] * (batch_size - 1)
__snake_case : Tuple = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__snake_case : Dict = [x[0] for x in coca_is_none if x[1]]
__snake_case : List[Any] = ", ".join(lowerCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__snake_case : List[Any] = self.get_image_description(lowerCamelCase )
if style_prompt is None:
if len(lowerCamelCase ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__snake_case : Union[str, Any] = self.get_image_description(lowerCamelCase )
# get prompt text embeddings for content and style
__snake_case : List[Any] = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors="pt" , )
__snake_case : Union[str, Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__snake_case : Tuple = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCamelCase , return_tensors="pt" , )
__snake_case : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__snake_case : Optional[Any] = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# duplicate text embeddings for each generation per prompt
__snake_case : str = text_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# set timesteps
__snake_case : Tuple = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__snake_case : str = {}
if accepts_offset:
__snake_case : Optional[Any] = 1
self.scheduler.set_timesteps(lowerCamelCase , **lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__snake_case , __snake_case : str = self.get_timesteps(lowerCamelCase , lowerCamelCase , self.device )
__snake_case : Dict = timesteps[:1].repeat(lowerCamelCase )
# Preprocess image
__snake_case : Dict = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
__snake_case : Dict = preprocess(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Dict = self.prepare_latents(
lowerCamelCase , lowerCamelCase , lowerCamelCase , text_embeddings.dtype , self.device , lowerCamelCase )
__snake_case : Optional[int] = slerp(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if clip_guidance_scale > 0:
__snake_case : List[str] = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase , lowerCamelCase )
__snake_case : str = slerp(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : Any = content_text_input.input_ids.shape[-1]
__snake_case : Tuple = self.tokenizer([""] , padding="max_length" , max_length=lowerCamelCase , return_tensors="pt" )
__snake_case : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case : Dict = uncond_embeddings.repeat_interleave(lowerCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case : Union[str, Any] = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
__snake_case : Optional[Any] = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__snake_case : int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Optional[int] = {}
if accepts_eta:
__snake_case : List[str] = eta
# check if the scheduler accepts generator
__snake_case : Optional[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__snake_case : Any = generator
with self.progress_bar(total=lowerCamelCase ):
for i, t in enumerate(lowerCamelCase ):
# expand the latents if we are doing classifier free guidance
__snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : List[Any] = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : Optional[Any] = noise_pred.chunk(2 )
__snake_case : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case : Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case , __snake_case : Tuple = self.cond_fn(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Tuple = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : int = 1 / 0.1_82_15 * latents
__snake_case : Optional[int] = self.vae.decode(lowerCamelCase ).sample
__snake_case : int = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : str = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 81 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Dict=400 , lowerCamelCase : str=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=1 / 255 , lowerCamelCase : Any=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : str = min_resolution
__snake_case : int = max_resolution
__snake_case : int = do_resize
__snake_case : Tuple = size
__snake_case : Any = do_normalize
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : str = do_pad
def __snake_case ( self : Any ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=False ) -> List[str]:
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : Dict = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[int] = int(self.size["shortest_edge"] * h / w )
__snake_case : int = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : Any = self.size["shortest_edge"]
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __snake_case ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
__snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> str:
# Initialize image_processing
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> str:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> List[str]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
# prepare image and target
__snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : List[Any] = {"image_id": 39769, "annotations": target}
# encode them
__snake_case : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__snake_case : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : List[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Tuple:
# prepare image, target and masks_path
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : str = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__snake_case : int = ConditionalDetrImageProcessor(format="coco_panoptic" )
__snake_case : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__snake_case : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__snake_case : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 81 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def __snake_case ( *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> Tuple:
pass
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = np.array(__lowerCamelCase )
__snake_case : str = npimg.shape
return {"hash": hashimage(__lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__UpperCAmelCase : Dict = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> Dict:
__snake_case : str = MaskGenerationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[int] ) -> str:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __snake_case ( self : str ) -> Tuple:
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__snake_case : Optional[int] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__snake_case : List[str] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : List[Any] = "facebook/sam-vit-huge"
__snake_case : Union[str, Any] = pipeline("mask-generation" , model=lowerCamelCase )
__snake_case : str = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__snake_case : Tuple = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 81 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_snake_case : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_snake_case : Dict = "UperNetConfig"
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
__snake_case : Dict = nn.BatchNormad(lowerCamelCase )
__snake_case : List[Any] = nn.ReLU()
def __snake_case ( self : List[Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : Dict = self.conv(lowerCamelCase )
__snake_case : int = self.batch_norm(lowerCamelCase )
__snake_case : Optional[Any] = self.activation(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
__snake_case : Tuple = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : List[str] = input
for layer in self.layers:
__snake_case : Tuple = layer(lowerCamelCase )
return hidden_state
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
__snake_case : Dict = pool_scales
__snake_case : List[str] = align_corners
__snake_case : List[Any] = in_channels
__snake_case : str = channels
__snake_case : Optional[Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
__snake_case : Tuple = []
for ppm in self.blocks:
__snake_case : Any = ppm(lowerCamelCase )
__snake_case : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class a (nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
super().__init__()
__snake_case : Dict = config
__snake_case : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case : Tuple = in_channels
__snake_case : str = config.hidden_size
__snake_case : List[str] = False
__snake_case : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case : List[Any] = nn.ModuleList()
__snake_case : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case : Union[str, Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
__snake_case : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
__snake_case : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> str:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
__snake_case : str = inputs[-1]
__snake_case : int = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
__snake_case : Tuple = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
__snake_case : Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
__snake_case : Dict = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Union[str, Any] = laterals[i - 1].shape[2:]
__snake_case : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__snake_case : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__snake_case : str = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Optional[Any] = self.fpn_bottleneck(lowerCamelCase )
__snake_case : Tuple = self.classifier(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__snake_case : List[Any] = config
__snake_case : List[str] = config.auxiliary_in_channels
__snake_case : List[Any] = config.auxiliary_channels
__snake_case : Tuple = config.auxiliary_num_convs
__snake_case : int = config.auxiliary_concat_input
__snake_case : Optional[int] = in_index
__snake_case : Tuple = (kernel_size // 2) * dilation
__snake_case : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
__snake_case : Union[str, Any] = nn.Identity()
else:
__snake_case : Any = nn.Sequential(*lowerCamelCase )
if self.concat_input:
__snake_case : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
__snake_case : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : Dict ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : Tuple ) -> Optional[int]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__snake_case : List[str] = encoder_hidden_states[self.in_index]
__snake_case : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
__snake_case : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = UperNetConfig
__UpperCAmelCase : int = "pixel_values"
__UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Optional[Any] ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Dict:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = value
_snake_case : Dict = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int ) -> Optional[int]:
super().__init__(lowerCamelCase )
__snake_case : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case : Union[str, Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
__snake_case : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case : Tuple = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
__snake_case : List[Any] = outputs.feature_maps
__snake_case : List[Any] = self.decode_head(lowerCamelCase )
__snake_case : List[str] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Optional[int] = None
if self.auxiliary_head is not None:
__snake_case : Dict = self.auxiliary_head(lowerCamelCase )
__snake_case : Dict = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__snake_case : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case : Union[str, Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case : Any = (logits,) + outputs[1:]
else:
__snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Union[str, Any] = BlipImageProcessor()
__snake_case : Union[str, Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__snake_case : Optional[int] = BlipaProcessor(lowerCamelCase , lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).tokenizer
def __snake_case ( self : str , **lowerCamelCase : List[str] ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Dict ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Dict ) -> int:
__snake_case : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : int ) -> Optional[Any]:
__snake_case : Dict = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Dict = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : List[Any] = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = "lower newer"
__snake_case : Union[str, Any] = processor(text=lowerCamelCase )
__snake_case : Tuple = tokenizer(lowerCamelCase , return_token_type_ids=lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : List[Any] = "lower newer"
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Optional[int] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __snake_case ( self : Dict ) -> Any:
__snake_case : List[Any] = self.get_image_processor()
__snake_case : str = self.get_tokenizer()
__snake_case : Optional[Any] = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Union[str, Any] = processor.batch_decode(lowerCamelCase )
__snake_case : List[str] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : int ) -> Optional[Any]:
__snake_case : Any = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : List[Any] = BlipaProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : str = "lower newer"
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : int = processor(text=lowerCamelCase , images=lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 81 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums:
return 0
__snake_case : Any = nums[0]
__snake_case : str = 0
for num in nums[1:]:
__snake_case , __snake_case : List[str] = (
max_excluding + num,
max(__lowerCamelCase , __lowerCamelCase ),
)
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 | 1 |
_snake_case : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case : Dict = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 81 |
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 81 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__snake_case : Union[str, Any] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
__snake_case : str = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
__snake_case : Any = max(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ) , b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = "marian"
__UpperCAmelCase : str = ["past_key_values"]
__UpperCAmelCase : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowerCamelCase : Tuple=58101 , lowerCamelCase : str=None , lowerCamelCase : Dict=1024 , lowerCamelCase : Tuple=12 , lowerCamelCase : List[str]=4096 , lowerCamelCase : Any=16 , lowerCamelCase : Tuple=12 , lowerCamelCase : Dict=4096 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Tuple=True , lowerCamelCase : str=True , lowerCamelCase : int="gelu" , lowerCamelCase : Any=1024 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[Any]=58100 , lowerCamelCase : str=False , lowerCamelCase : Dict=58100 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Tuple=0 , lowerCamelCase : List[str]=True , **lowerCamelCase : int , ) -> Optional[Any]:
__snake_case : Dict = vocab_size
__snake_case : List[str] = decoder_vocab_size or vocab_size
__snake_case : int = max_position_embeddings
__snake_case : int = d_model
__snake_case : Optional[Any] = encoder_ffn_dim
__snake_case : str = encoder_layers
__snake_case : Optional[int] = encoder_attention_heads
__snake_case : Any = decoder_ffn_dim
__snake_case : str = decoder_layers
__snake_case : Dict = decoder_attention_heads
__snake_case : str = dropout
__snake_case : Any = attention_dropout
__snake_case : int = activation_dropout
__snake_case : Optional[Any] = activation_function
__snake_case : str = init_std
__snake_case : List[str] = encoder_layerdrop
__snake_case : str = decoder_layerdrop
__snake_case : Optional[Any] = use_cache
__snake_case : Tuple = encoder_layers
__snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Dict = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __snake_case ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__snake_case : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case : Optional[int] = {0: "batch"}
__snake_case : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__snake_case : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
__snake_case : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case , __snake_case : Dict = self.num_layers
for i in range(lowerCamelCase ):
__snake_case : List[str] = {0: "batch", 2: "past_sequence + sequence"}
__snake_case : Any = {0: "batch", 2: "past_sequence + sequence"}
else:
__snake_case : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __snake_case ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__snake_case : Dict = super().outputs
else:
__snake_case : Optional[int] = super(lowerCamelCase , self ).outputs
if self.use_past:
__snake_case , __snake_case : Dict = self.num_layers
for i in range(lowerCamelCase ):
__snake_case : Dict = {0: "batch", 2: "past_sequence + sequence"}
__snake_case : List[str] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __snake_case ( self : Any , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__snake_case : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__snake_case : Any = seq_length if not self.use_past else 1
__snake_case : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__snake_case : int = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case : Any = common_inputs["input_ids"].shape
__snake_case : Tuple = common_inputs["decoder_input_ids"].shape[1]
__snake_case , __snake_case : Optional[Any] = self.num_attention_heads
__snake_case : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case : int = decoder_seq_length + 3
__snake_case : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case : Optional[int] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__snake_case : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case : str = self.num_layers
__snake_case : Any = min(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__snake_case : List[str] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__snake_case : List[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def __snake_case ( self : int , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__snake_case : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case : List[Any] = seqlen + 2
__snake_case , __snake_case : List[Any] = self.num_layers
__snake_case , __snake_case : Union[str, Any] = self.num_attention_heads
__snake_case : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case : Any = common_inputs["attention_mask"].dtype
__snake_case : Optional[int] = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__snake_case : Tuple = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def __snake_case ( self : Optional[int] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case : Optional[int] = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case : List[Any] = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__snake_case : Optional[int] = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__snake_case : List[Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case : List[str] = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def __snake_case ( self : List[str] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__snake_case : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__snake_case : str = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
__snake_case : List[Any] = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__snake_case : Tuple = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> float:
return 1E-4
| 81 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_snake_case : Dict = parser.parse_args()
if args.model_type == "bert":
_snake_case : Dict = BertForMaskedLM.from_pretrained(args.model_name)
_snake_case : List[Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
_snake_case : List[str] = model.state_dict()
_snake_case : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
_snake_case : Union[str, Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
_snake_case : Dict = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
_snake_case : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_snake_case : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
_snake_case : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
_snake_case : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
_snake_case : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
_snake_case : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
_snake_case : List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
_snake_case : Union[str, Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
_snake_case : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
_snake_case : Dict = state_dict["cls.predictions.decoder.weight"]
_snake_case : Tuple = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_snake_case : Dict = state_dict[f'''cls.predictions.transform.dense.{w}''']
_snake_case : int = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 81 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
# TODO Update this
_snake_case : Tuple = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "esm"
def __init__( self : Dict , lowerCamelCase : Dict=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Optional[int]=768 , lowerCamelCase : Dict=12 , lowerCamelCase : str=12 , lowerCamelCase : List[Any]=3072 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Dict=1026 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Optional[Any]=1E-12 , lowerCamelCase : Optional[int]="absolute" , lowerCamelCase : str=True , lowerCamelCase : Any=None , lowerCamelCase : List[Any]=False , lowerCamelCase : int=False , lowerCamelCase : Dict=None , lowerCamelCase : Any=None , **lowerCamelCase : Any , ) -> int:
super().__init__(pad_token_id=lowerCamelCase , mask_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[int] = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : str = position_embedding_type
__snake_case : List[str] = use_cache
__snake_case : Tuple = emb_layer_norm_before
__snake_case : str = token_dropout
__snake_case : Any = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__snake_case : int = EsmFoldConfig()
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = EsmFoldConfig(**lowerCamelCase )
__snake_case : List[str] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__snake_case : int = get_default_vocab_list()
else:
__snake_case : int = vocab_list
else:
__snake_case : List[Any] = None
__snake_case : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCamelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase ):
__snake_case : int = self.esmfold_config.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : str = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 128
__UpperCAmelCase : "TrunkConfig" = None
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
if self.trunk is None:
__snake_case : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase ):
__snake_case : Any = TrunkConfig(**self.trunk )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
__snake_case : Optional[Any] = asdict(self )
__snake_case : Tuple = self.trunk.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : int = 48
__UpperCAmelCase : int = 1024
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : float = 0
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = 128
__UpperCAmelCase : "StructureModuleConfig" = None
def __snake_case ( self : List[Any] ) -> Any:
if self.structure_module is None:
__snake_case : Union[str, Any] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase ):
__snake_case : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__snake_case : List[Any] = self.sequence_state_dim // self.sequence_head_width
__snake_case : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def __snake_case ( self : int ) -> Dict:
__snake_case : str = asdict(self )
__snake_case : Any = self.structure_module.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : int = 384
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 16
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 12
__UpperCAmelCase : int = 4
__UpperCAmelCase : int = 8
__UpperCAmelCase : float = 0.1
__UpperCAmelCase : int = 8
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 2
__UpperCAmelCase : int = 7
__UpperCAmelCase : int = 10
__UpperCAmelCase : float = 1e-8
__UpperCAmelCase : float = 1e5
def __snake_case ( self : Dict ) -> Any:
return asdict(self )
def lowerCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_encoder_blocks" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any]=13 , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : List[str]=3 , lowerCamelCase : Union[str, Any]=4 , lowerCamelCase : Optional[int]=[2, 2, 2, 2] , lowerCamelCase : List[str]=[8, 4, 2, 1] , lowerCamelCase : int=[16, 32, 64, 128] , lowerCamelCase : int=[1, 4, 8, 16] , lowerCamelCase : int=[1, 2, 4, 8] , lowerCamelCase : str=True , lowerCamelCase : List[Any]=True , lowerCamelCase : int="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : List[Any]=None , ) -> Dict:
__snake_case : Dict = parent
__snake_case : int = batch_size
__snake_case : int = image_size
__snake_case : Optional[Any] = num_channels
__snake_case : Tuple = num_encoder_blocks
__snake_case : int = sr_ratios
__snake_case : int = depths
__snake_case : Optional[Any] = hidden_sizes
__snake_case : Optional[int] = downsampling_rates
__snake_case : Optional[int] = num_attention_heads
__snake_case : Tuple = is_training
__snake_case : Optional[Any] = use_labels
__snake_case : Optional[int] = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : int = num_labels
__snake_case : List[str] = scope
def __snake_case ( self : Dict ) -> Any:
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Dict = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Dict = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
__snake_case : Any = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int ) -> str:
__snake_case : Any = self.num_labels
__snake_case : List[str] = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__snake_case : Optional[int] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Any ) -> List[str]:
__snake_case : Any = 1
__snake_case : Any = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
__snake_case : Any = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Any = False
def __snake_case ( self : str ) -> Dict:
__snake_case : Dict = SegformerModelTester(self )
__snake_case : Tuple = SegformerConfigTester(self , config_class=lowerCamelCase )
def __snake_case ( self : str ) -> int:
self.config_tester.run_common_tests()
def __snake_case ( self : int ) -> Any:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Dict ) -> Any:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def __snake_case ( self : int ) -> Optional[int]:
pass
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = True
for model_class in self.all_model_classes:
__snake_case : Dict = True
__snake_case : str = False
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : str = outputs.attentions
__snake_case : str = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
__snake_case : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__snake_case : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : List[Any] = (self.model_tester.image_size // 32) ** 2
__snake_case : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : Dict = len(lowerCamelCase )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = True
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase ) )
__snake_case : Optional[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
__snake_case : Any = (self.model_tester.image_size // 4) ** 2
__snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str ):
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Tuple = outputs.hidden_states
__snake_case : int = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : int = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
__snake_case : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : int = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : int = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : Tuple ) -> Optional[int]:
pass
@slow
def __snake_case ( self : int ) -> Dict:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
# only resize + normalize
__snake_case : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
__snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCamelCase )
__snake_case : Any = prepare_img()
__snake_case : Optional[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" )
__snake_case : Optional[int] = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__snake_case : List[Any] = model(lowerCamelCase )
__snake_case : Dict = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Dict = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : List[Any] ) -> Tuple:
# only resize + normalize
__snake_case : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
__snake_case : List[str] = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(lowerCamelCase )
__snake_case : int = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" )
__snake_case : str = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__snake_case : List[str] = model(lowerCamelCase )
__snake_case : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Tuple = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-1 ) )
@slow
def __snake_case ( self : str ) -> List[str]:
# only resize + normalize
__snake_case : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
__snake_case : Dict = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCamelCase )
__snake_case : Any = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" )
__snake_case : Optional[Any] = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase )
__snake_case : List[Any] = outputs.logits.detach().cpu()
__snake_case : int = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
__snake_case : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : int = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : str = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = SpeechTaTokenizer
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = True
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Tuple = SpeechTaTokenizer(lowerCamelCase )
__snake_case : Tuple = AddedToken("<mask>" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
__snake_case : Optional[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] ) -> Union[str, Any]:
__snake_case : Any = "this is a test"
__snake_case : Tuple = "this is a test"
return input_text, output_text
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[Any]=5 ) -> Optional[int]:
__snake_case , __snake_case : int = self.get_input_output_texts(lowerCamelCase )
__snake_case : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__snake_case : Optional[Any] = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def __snake_case ( self : Any ) -> List[str]:
__snake_case : Tuple = "<pad>"
__snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowerCamelCase ) , 81 )
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Optional[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__snake_case : Optional[Any] = tokenizer.vocab_size
__snake_case : List[Any] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : List[Any] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__snake_case : int = tokenizer.add_tokens(lowerCamelCase )
__snake_case : Optional[int] = tokenizer.vocab_size
__snake_case : List[Any] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size + len(lowerCamelCase ) )
__snake_case : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Union[str, Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__snake_case : Optional[int] = tokenizer.add_special_tokens(lowerCamelCase )
__snake_case : Dict = tokenizer.vocab_size
__snake_case : Optional[int] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size_a + len(lowerCamelCase ) )
__snake_case : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
pass
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __snake_case ( self : List[Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowerCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__snake_case : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase )
# fmt: off
self.assertListEqual(lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def __snake_case ( self : Dict ) -> Dict:
# Use custom sequence because this tokenizer does not handle numbers.
__snake_case : str = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__snake_case : List[Any] = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowerCamelCase , )
| 81 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ["OwlViTFeatureExtractor"]
_snake_case : Optional[int] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "van"
def __init__( self : Optional[int] , lowerCamelCase : Any=224 , lowerCamelCase : str=3 , lowerCamelCase : Any=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : Dict=[8, 8, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Tuple=1E-6 , lowerCamelCase : Optional[int]=1E-2 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.0 , **lowerCamelCase : Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = image_size
__snake_case : Any = num_channels
__snake_case : Any = patch_sizes
__snake_case : List[Any] = strides
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : int = dropout_rate
| 81 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "van"
def __init__( self : Optional[int] , lowerCamelCase : Any=224 , lowerCamelCase : str=3 , lowerCamelCase : Any=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : Dict=[8, 8, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Tuple=1E-6 , lowerCamelCase : Optional[int]=1E-2 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.0 , **lowerCamelCase : Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = image_size
__snake_case : Any = num_channels
__snake_case : Any = patch_sizes
__snake_case : List[Any] = strides
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : int = dropout_rate
| 81 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=13 , lowerCamelCase : str=32 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=640 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : int=3 , lowerCamelCase : Dict=32 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : int=None , ) -> str:
__snake_case : Optional[Any] = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Any = num_channels
__snake_case : Union[str, Any] = last_hidden_size
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Tuple = conv_kernel_size
__snake_case : Any = output_stride
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = is_training
__snake_case : Dict = num_labels
__snake_case : Any = initializer_range
__snake_case : Optional[int] = scope
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Dict:
__snake_case : List[Any] = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = self.num_labels
__snake_case : List[Any] = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Tuple = MobileViTModelTester(self )
__snake_case : Any = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __snake_case ( self : Dict ) -> Any:
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __snake_case ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __snake_case ( self : int ) -> Dict:
pass
def __snake_case ( self : int ) -> Union[str, Any]:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : Any ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any ):
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : int = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : List[Any] = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __snake_case ( self : List[str] ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str ) -> Dict:
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__snake_case : Optional[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : str = model.to(lowerCamelCase )
__snake_case : int = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Optional[int] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(**lowerCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# verify the logits
__snake_case : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : Tuple = model.to(lowerCamelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
__snake_case : Dict = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__snake_case : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case : Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
__snake_case : int = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__snake_case : List[str] = parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCamelCase ):
def fn(__lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
__snake_case : Tuple = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase )
__snake_case : str = tf.train.Example(features=__lowerCamelCase )
__snake_case : List[str] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
__snake_case : Dict = dataset.select(range(__lowerCamelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__snake_case : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
__snake_case : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__snake_case : Any = tokenize_function(__lowerCamelCase )
__snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
__snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
__snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__snake_case : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__snake_case : int = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 )
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
__snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size]
__snake_case : Any = len(dataset_snapshot["input_ids"] )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
__snake_case : Union[str, Any] = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = parse_args()
main(args)
| 81 | 1 |
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : List[Any] = "\\n Text data.\n Second line of data."
_snake_case : Tuple = "file"
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__snake_case : str = input_paths[compression_format]
__snake_case : Optional[Any] = tmp_path / "cache"
__snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
__snake_case : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = "custom_cache"
__snake_case : List[str] = "custom_extracted_dir"
__snake_case : Any = tmp_path / "custom_extracted_path"
if default_extracted:
__snake_case : List[Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
__snake_case : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__snake_case : Optional[int] = xz_file
__snake_case : Optional[int] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__snake_case : str = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : Optional[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__snake_case : Any = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__snake_case : Optional[int] = "./__missing_file__.txt"
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__lowerCamelCase ) as f:
__snake_case : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 81 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_snake_case : Dict = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "decord" )
self.check_model_type(lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None ) -> Optional[int]:
__snake_case : Any = {}
if frame_sampling_rate is not None:
__snake_case : Union[str, Any] = frame_sampling_rate
if num_frames is not None:
__snake_case : Optional[Any] = num_frames
__snake_case : int = {}
if top_k is not None:
__snake_case : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , lowerCamelCase : Union[str, List[str]] , **lowerCamelCase : Optional[int] ) -> Tuple:
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Dict=1 ) -> str:
if num_frames is None:
__snake_case : List[str] = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__snake_case : Optional[Any] = BytesIO(requests.get(lowerCamelCase ).content )
__snake_case : Optional[int] = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__snake_case : Optional[Any] = 0
__snake_case : Dict = num_frames * frame_sampling_rate - 1
__snake_case : Union[str, Any] = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__snake_case : Optional[Any] = videoreader.get_batch(lowerCamelCase ).asnumpy()
__snake_case : Any = list(lowerCamelCase )
__snake_case : List[Any] = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def __snake_case ( self : Dict , lowerCamelCase : Tuple ) -> Tuple:
__snake_case : List[str] = self.model(**lowerCamelCase )
return model_outputs
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : int=5 ) -> List[Any]:
if top_k > self.model.config.num_labels:
__snake_case : str = self.model.config.num_labels
if self.framework == "pt":
__snake_case : List[str] = model_outputs.logits.softmax(-1 )[0]
__snake_case , __snake_case : Union[str, Any] = probs.topk(lowerCamelCase )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__snake_case : Any = scores.tolist()
__snake_case : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 81 |
_snake_case : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case : Dict = ["a", "b", "c", "d", "e"]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = start
# add current to visited
visited.append(__lowerCamelCase )
__snake_case : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case : Tuple = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case : int = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case : List[Any] = topological_sort("a", [], [])
print(sort)
| 81 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.