code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ : int = 0
while arr[min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - 1] < x:
lowerCAmelCase__ : List[Any] = step
step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCAmelCase__ : Optional[Any] = prev + 1
if prev == min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
lowerCamelCase__ = int(input("""Enter the number to be searched:\n"""))
lowerCamelCase__ = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F"""Number {x} is at index {res}""")
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
if rng is None:
lowerCAmelCase__ : str = global_rng
lowerCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self : Dict , a : List[Any] , a : str=7 , a : Any=400 , a : int=2_000 , a : List[str]=1 , a : int=0.0 , a : Union[str, Any]=16_000 , a : Dict=True , a : Any=True , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : Optional[int] = max_seq_length
lowerCAmelCase__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Optional[int] = feature_size
lowerCAmelCase__ : Union[str, Any] = padding_value
lowerCAmelCase__ : List[Any] = sampling_rate
lowerCAmelCase__ : Dict = return_attention_mask
lowerCAmelCase__ : Optional[Any] = do_normalize
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self : str , a : int=False , a : List[str]=False ):
'''simple docstring'''
def _flatten(a : Any ):
return list(itertools.chain(*a ) )
if equal_length:
lowerCAmelCase__ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Optional[Any] = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ASTFeatureExtractor
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ASTFeatureExtractionTester(self )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Optional[int] = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : List[Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ : Any = feat_extract(a , padding=a , return_tensors='np' ).input_values
lowerCAmelCase__ : Dict = feat_extract(a , padding=a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : Optional[int] = np.asarray(a )
lowerCAmelCase__ : int = feat_extract(a , return_tensors='np' ).input_values
lowerCAmelCase__ : Tuple = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
@require_torch
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
import torch
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase__ : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ : Optional[int] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase__ : List[str] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] ):
'''simple docstring'''
from datasets import load_dataset
lowerCAmelCase__ : Tuple = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ : Tuple = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
lowerCAmelCase__ : List[str] = self._load_datasamples(1 )
lowerCAmelCase__ : Optional[Any] = ASTFeatureExtractor()
lowerCAmelCase__ : Dict = feature_extractor(a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1E-4 ) )
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# Return True if there is node that has not iterated.
lowerCAmelCase__ : List[str] = [False] * len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = [s]
lowerCAmelCase__ : List[str] = True
while queue:
lowerCAmelCase__ : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = u
return visited[t]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[str] = [-1] * (len(SCREAMING_SNAKE_CASE_ ))
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Tuple = [i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : int = float('Inf' )
lowerCAmelCase__ : Tuple = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase__ : Any = min(SCREAMING_SNAKE_CASE_ , graph[parent[s]][s] )
lowerCAmelCase__ : Dict = parent[s]
max_flow += path_flow
lowerCAmelCase__ : int = sink
while v != source:
lowerCAmelCase__ : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase__ : List[Any] = parent[v]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000 ) -> int:
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any:
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : List[Any] = ''
else:
lowerCAmelCase__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : List[str] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = val
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = ViTConfig()
lowerCAmelCase__ : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[int] = int(vit_name[-12:-10] )
lowerCAmelCase__ : List[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : int = 1_000
lowerCAmelCase__ : Union[str, Any] = 'huggingface/label-files'
lowerCAmelCase__ : List[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : Tuple = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-6:-4] )
lowerCAmelCase__ : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowerCAmelCase__ : Dict = 192
lowerCAmelCase__ : List[Any] = 768
lowerCAmelCase__ : Union[str, Any] = 12
lowerCAmelCase__ : Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
lowerCAmelCase__ : int = 384
lowerCAmelCase__ : List[str] = 1_536
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : int = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[str] = 2_304
lowerCAmelCase__ : Optional[int] = 8
lowerCAmelCase__ : Union[str, Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowerCAmelCase__ : Optional[Any] = 1_024
lowerCAmelCase__ : Dict = 4_096
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Union[str, Any] = 16
elif vit_name[4:].startswith('huge' ):
lowerCAmelCase__ : int = 1_280
lowerCAmelCase__ : List[str] = 5_120
lowerCAmelCase__ : Tuple = 32
lowerCAmelCase__ : List[str] = 16
# load original model from timm
lowerCAmelCase__ : Any = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Tuple = ViTModel(SCREAMING_SNAKE_CASE_ ).eval()
else:
lowerCAmelCase__ : Union[str, Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : str = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : List[Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : str = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ : List[Any] = encoding['pixel_values']
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ )
if base_model:
lowerCAmelCase__ : Optional[int] = timm_model.forward_features(SCREAMING_SNAKE_CASE_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Optional[int] = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list:
lowerCAmelCase__ : int = word.split()
def justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Any = max_width - width
lowerCAmelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ : Any = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ : List[Any] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : list[str] = []
lowerCAmelCase__ : str = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE_ )
width += len(SCREAMING_SNAKE_CASE_ )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# reset new line and new width
lowerCAmelCase__ , lowerCAmelCase__ : Any = [word], len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = max_width - width - len(SCREAMING_SNAKE_CASE_ )
answer.append(' '.join(SCREAMING_SNAKE_CASE_ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ConditionalDetrFeatureExtractor"""]
lowerCamelCase__ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> List[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Tuple:
lowerCAmelCase__ : Optional[int] = []
for old_item in old_list:
lowerCAmelCase__ : List[Any] = old_item.replace('in_layers.0' , 'norm1' )
lowerCAmelCase__ : str = new_item.replace('in_layers.2' , 'conv1' )
lowerCAmelCase__ : Tuple = new_item.replace('out_layers.0' , 'norm2' )
lowerCAmelCase__ : str = new_item.replace('out_layers.3' , 'conv2' )
lowerCAmelCase__ : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
lowerCAmelCase__ : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
lowerCAmelCase__ : str = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Dict:
lowerCAmelCase__ : List[str] = []
for old_item in old_list:
lowerCAmelCase__ : str = old_item
lowerCAmelCase__ : Tuple = new_item.replace('norm.weight' , 'group_norm.weight' )
lowerCAmelCase__ : Union[str, Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
lowerCAmelCase__ : List[str] = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
lowerCAmelCase__ : Optional[int] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
lowerCAmelCase__ : Dict = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase__ : List[Any] = old_checkpoint[path]
lowerCAmelCase__ : Dict = old_tensor.shape[0] // 3
lowerCAmelCase__ : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase__ : str = old_tensor.shape[0] // config['num_head_channels'] // 3
lowerCAmelCase__ : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase__ : str = query.reshape(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = key.reshape(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
lowerCAmelCase__ : Optional[Any] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase__ : List[str] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
lowerCAmelCase__ : int = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
lowerCAmelCase__ : Optional[int] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase__ : str = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase__ : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
lowerCAmelCase__ : List[Any] = old_checkpoint[path['old']]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Union[str, Any] = checkpoint['time_embed.0.weight']
lowerCAmelCase__ : Union[str, Any] = checkpoint['time_embed.0.bias']
lowerCAmelCase__ : Dict = checkpoint['time_embed.2.weight']
lowerCAmelCase__ : Tuple = checkpoint['time_embed.2.bias']
lowerCAmelCase__ : Optional[Any] = checkpoint['input_blocks.0.0.weight']
lowerCAmelCase__ : List[str] = checkpoint['input_blocks.0.0.bias']
lowerCAmelCase__ : Tuple = checkpoint['out.0.weight']
lowerCAmelCase__ : List[str] = checkpoint['out.0.bias']
lowerCAmelCase__ : Dict = checkpoint['out.2.weight']
lowerCAmelCase__ : List[str] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
lowerCAmelCase__ : Dict = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
lowerCAmelCase__ : List[str] = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase__ : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase__ : int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
lowerCAmelCase__ : Any = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = (i - 1) // (config['num_res_blocks'] + 1)
lowerCAmelCase__ : Tuple = (i - 1) % (config['num_res_blocks'] + 1)
lowerCAmelCase__ : List[Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowerCAmelCase__ : str = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCAmelCase__ : Tuple = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowerCAmelCase__ : Any = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCAmelCase__ : Dict = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCAmelCase__ : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = {
'old': F'''input_blocks.{i}.1''',
'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCAmelCase__ : List[str] = {
F'''input_blocks.{i}.1.qkv.bias''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Optional[Any] = middle_blocks[0]
lowerCAmelCase__ : Optional[Any] = middle_blocks[1]
lowerCAmelCase__ : Union[str, Any] = middle_blocks[2]
lowerCAmelCase__ : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = i // (config['num_res_blocks'] + 1)
lowerCAmelCase__ : Any = i % (config['num_res_blocks'] + 1)
lowerCAmelCase__ : Optional[int] = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]]
lowerCAmelCase__ : Union[str, Any] = {}
for layer in output_block_layers:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = layer.split('.' )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Any = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCAmelCase__ : str = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowerCAmelCase__ : Any = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowerCAmelCase__ : Any = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase__ : Union[str, Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
lowerCAmelCase__ : Optional[int] = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCAmelCase__ : Dict = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
lowerCAmelCase__ : str = []
if len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = {
'old': F'''output_blocks.{i}.1''',
'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCAmelCase__ : str = {
F'''output_blocks.{i}.1.qkv.bias''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , )
else:
lowerCAmelCase__ : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase__ : Any = '.'.join(['output_blocks', str(SCREAMING_SNAKE_CASE_ ), path['old']] )
lowerCAmelCase__ : Optional[int] = '.'.join(['up_blocks', str(SCREAMING_SNAKE_CASE_ ), 'resnets', str(SCREAMING_SNAKE_CASE_ ), path['new']] )
lowerCAmelCase__ : str = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCamelCase__ = json.loads(f.read())
lowerCamelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCamelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCamelCase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowerCamelCase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowerCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase__ = logging.get_logger(__name__)
@dataclass
class A__ :
lowercase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.task_name.lower()
class A__ ( __magic_name__ ):
lowercase = 'train'
lowercase = 'dev'
lowercase = 'test'
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self : Optional[Any] , a : GlueDataTrainingArguments , a : PreTrainedTokenizerBase , a : Optional[int] = None , a : Union[str, Split] = Split.train , a : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , a , )
lowerCAmelCase__ : int = args
lowerCAmelCase__ : int = glue_processors[args.task_name]()
lowerCAmelCase__ : List[Any] = glue_output_modes[args.task_name]
if isinstance(a , a ):
try:
lowerCAmelCase__ : Optional[Any] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase__ : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowerCAmelCase__ : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = label_list[2], label_list[1]
lowerCAmelCase__ : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ : Any = cached_features_file + '.lock'
with FileLock(a ):
if os.path.exists(a ) and not args.overwrite_cache:
lowerCAmelCase__ : Dict = time.time()
lowerCAmelCase__ : str = torch.load(a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowerCAmelCase__ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase__ : Tuple = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase__ : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase__ : Union[str, Any] = examples[:limit_length]
lowerCAmelCase__ : List[Any] = glue_convert_examples_to_features(
a , a , max_length=args.max_seq_length , label_list=a , output_mode=self.output_mode , )
lowerCAmelCase__ : int = time.time()
torch.save(self.features , a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , a : int ):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.label_list
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : Any , *a : Any , **a : List[str] ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
import qiskit
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> qiskit.result.counts.Counts:
lowerCAmelCase__ : List[Any] = qiskit.Aer.get_backend('aer_simulator' )
lowerCAmelCase__ : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase__ : Tuple = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase__ = HfArgumentParser(InitializationArguments)
lowerCamelCase__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase__ = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
lowerCamelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Any = input('Enter message: ' )
lowerCAmelCase__ : Tuple = input('Enter key [alphanumeric]: ' )
lowerCAmelCase__ : Optional[int] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
lowerCAmelCase__ : Tuple = 'encrypt'
lowerCAmelCase__ : Any = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith('d' ):
lowerCAmelCase__ : Any = 'decrypt'
lowerCAmelCase__ : List[str] = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'''\n{mode.title()}ed message:''' )
print(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
return translate_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encrypt' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
return translate_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decrypt' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = key.upper()
for symbol in message:
lowerCAmelCase__ : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = 0
else:
translated.append(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
import os
def lowerCAmelCase__ ( ) -> Optional[Any]:
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '/grid.txt' ) as f:
lowerCAmelCase__ : Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(SCREAMING_SNAKE_CASE_ ) for x in f.readline().split()] )
lowerCAmelCase__ : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase__ : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase__ : int = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase__ : int = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase__ : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase__ : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase__ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase__ : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A__ :
def __init__( self : Any , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = data
lowerCAmelCase__ : Node | None = None
class A__ :
def __init__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[Any] = None
def __iter__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.head
while self.head:
yield node.data
lowerCAmelCase__ : Any = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(a ) for item in iter(self ) )
def _lowerCamelCase ( self : List[str] , a : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , a )
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
self.insert_nth(0 , a )
def _lowerCamelCase ( self : str , a : int , a : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ : int = Node(a )
if self.head is None:
lowerCAmelCase__ : Optional[int] = new_node # first node points itself
lowerCAmelCase__ : Union[str, Any] = new_node
elif index == 0: # insert at head
lowerCAmelCase__ : Dict = self.head
lowerCAmelCase__ : str = new_node
else:
lowerCAmelCase__ : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : List[str] = temp.next
lowerCAmelCase__ : Tuple = temp.next
lowerCAmelCase__ : Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ : Optional[Any] = new_node
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.delete_nth(0 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _lowerCamelCase ( self : Optional[Any] , a : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ : str = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ : str = None
elif index == 0: # delete head node
lowerCAmelCase__ : int = self.tail.next.next
lowerCAmelCase__ : Tuple = self.head.next
else:
lowerCAmelCase__ : str = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : List[Any] = temp.next
lowerCAmelCase__ : List[str] = temp.next
lowerCAmelCase__ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ : str = temp
return delete_node.data
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Any = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = """▁"""
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = BigBirdTokenizer
lowercase = BigBirdTokenizerFast
lowercase = True
lowercase = True
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : List[str] = self.tokenizer_class(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = '<s>'
lowerCAmelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(a ) , 1_004 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Optional[int] = self.get_tokenizer()
lowerCAmelCase__ : Tuple = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ : str = tokenizer.tokenize(a )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Any = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = self.get_rust_tokenizer()
lowerCAmelCase__ : Any = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = BigBirdTokenizer(a , keep_accents=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase__ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ : str = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = 'Hello World!'
lowerCAmelCase__ : Optional[int] = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ : int = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase__ : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ : Tuple = ' '.join(a )
lowerCAmelCase__ : Tuple = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a )
lowerCAmelCase__ : int = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a )
lowerCAmelCase__ : Any = BigBirdConfig(attention_type='original_full' )
lowerCAmelCase__ : Union[str, Any] = BigBirdModel(a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowerCAmelCase__ : List[Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {'input_ids': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
import argparse
import json
import subprocess
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCAmelCase__ : List[str] = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
lowerCAmelCase__ : Any = output.stdout.decode('utf-8' )
lowerCAmelCase__ : Tuple = json.loads(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : Optional[int] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return values.split(',' )
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCamelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = GPTSwaTokenizer
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[str] = GPTSwaTokenizer(a , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : int , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'This is a test'
lowerCAmelCase__ : List[str] = 'This is a test'
return input_text, output_text
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = '<s>'
lowerCAmelCase__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 2_000 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = GPTSwaTokenizer(a )
lowerCAmelCase__ : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [465, 287, 265, 631, 842] )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens(a )
# fmt: off
self.assertListEqual(
a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = GPTSwaTokenizer(a )
lowerCAmelCase__ : int = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase__ : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a , a ):
self.assertListEqual(tokenizer.encode_fast(a ) , a )
# Test that decode_fast returns the input text
for text, token_ids in zip(a , a ):
self.assertEqual(tokenizer.decode_fast(a ) , a )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase__ : Dict = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='AI-Sweden/gpt-sw3-126m' , sequences=a , )
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
from math import pi
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
import random
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
lowerCAmelCase__ : Optional[int] = items[random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_ , index - (m + count) )
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Optional[int] , a : List[str] , a : Optional[int] , a : bool = True , a : bool = False ):
'''simple docstring'''
lowerCAmelCase__ : Any = scheduler
lowerCAmelCase__ : List[Any] = optimizers if isinstance(a , (list, tuple) ) else [optimizers]
lowerCAmelCase__ : str = split_batches
lowerCAmelCase__ : Optional[Any] = step_with_optimizer
lowerCAmelCase__ : List[str] = GradientState()
def _lowerCamelCase ( self : Optional[Any] , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a , **a )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a , **a )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase__ : List[str] = AcceleratorState().num_processes
for _ in range(a ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a , **a )
else:
self.scheduler.step(*a , **a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.scheduler.state_dict()
def _lowerCamelCase ( self : Union[str, Any] , a : int ):
'''simple docstring'''
self.scheduler.load_state_dict(a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.scheduler.get_lr()
def _lowerCamelCase ( self : Dict , *a : Union[str, Any] , **a : Tuple ):
'''simple docstring'''
return self.scheduler.print_lr(*a , **a )
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class A__ ( __magic_name__ ):
lowercase = 'roberta-prelayernorm'
def __init__( self : List[str] , a : List[str]=50_265 , a : Any=768 , a : Dict=12 , a : Dict=12 , a : List[str]=3_072 , a : Optional[int]="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : str=512 , a : Optional[int]=2 , a : List[Any]=0.0_2 , a : List[str]=1E-12 , a : Optional[int]=1 , a : int=0 , a : Optional[int]=2 , a : int="absolute" , a : Any=True , a : int=None , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class A__ ( __magic_name__ ):
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
import os
from math import logaa
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "base_exp.txt" ) -> int:
lowerCAmelCase__ : float = 0
lowerCAmelCase__ : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ : str = list(map(SCREAMING_SNAKE_CASE_ , line.split(',' ) ) )
if x * logaa(SCREAMING_SNAKE_CASE_ ) > largest:
lowerCAmelCase__ : List[str] = x * logaa(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : int = ''
else:
lowerCAmelCase__ : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = val
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase__ : Optional[int] = 8
# set labels if required
if not base_model:
lowerCAmelCase__ : Union[str, Any] = 1_000
lowerCAmelCase__ : str = 'huggingface/label-files'
lowerCAmelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : List[Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase__ : Any = 384
lowerCAmelCase__ : Optional[Any] = 1_536
lowerCAmelCase__ : Union[str, Any] = 12
lowerCAmelCase__ : Optional[Any] = 6
# load original model from torch hub
lowerCAmelCase__ : List[Any] = torch.hub.load('facebookresearch/dino:main' , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
lowerCAmelCase__ : Union[str, Any] = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
lowerCAmelCase__ : Tuple = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase__ : Tuple = ViTImageProcessor()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ : str = encoding['pixel_values']
lowerCAmelCase__ : str = model(SCREAMING_SNAKE_CASE_ )
if base_model:
lowerCAmelCase__ : Dict = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
lowerCAmelCase__ : Any = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
from __future__ import annotations
class A__ :
def __init__( self : Dict , a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = data
lowerCAmelCase__ : Node | None = None
lowerCAmelCase__ : Node | None = None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase__ ( ) -> None: # Main function for testing.
lowerCAmelCase__ : Dict = Node(1 )
lowerCAmelCase__ : Tuple = Node(2 )
lowerCAmelCase__ : Union[str, Any] = Node(3 )
lowerCAmelCase__ : Optional[int] = Node(4 )
lowerCAmelCase__ : Any = Node(5 )
lowerCAmelCase__ : Optional[Any] = Node(6 )
lowerCAmelCase__ : Union[str, Any] = Node(7 )
lowerCAmelCase__ : Any = Node(8 )
lowerCAmelCase__ : Optional[Any] = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) )
print('Tree is: ' )
display(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , a : Optional[int] , a : int=2 , a : Union[str, Any]=56 , a : List[Any]=True , a : Dict=True , a : List[Any]=True , a : Optional[int]=True , a : Tuple=99 , a : Any=32 , a : Any=2 , a : Optional[int]=2 , a : List[Any]=7 , a : Optional[int]="gelu_new" , a : Any=0.1 , a : Tuple=0.1 , a : str=512 , a : str=16 , a : Optional[Any]=2 , a : List[str]=0.0_2 , a : Tuple=4 , a : Optional[int]="block_sparse" , a : List[str]=True , a : str=False , a : Any=2 , a : Optional[int]=3 , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : Tuple = use_attention_mask
lowerCAmelCase__ : str = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : List[Any] = num_choices
lowerCAmelCase__ : Dict = rescale_embeddings
lowerCAmelCase__ : Tuple = attention_type
lowerCAmelCase__ : Any = use_bias
lowerCAmelCase__ : Tuple = block_size
lowerCAmelCase__ : Dict = num_random_blocks
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = config_and_inputs
lowerCAmelCase__ : Dict = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Dict = self._prepare_for_class(a , a )
lowerCAmelCase__ : Optional[int] = model_class(a )
@jax.jit
def model_jitted(a : List[str] , a : Optional[Any]=None , **a : int ):
return model(input_ids=a , attention_mask=a , **a )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ : Optional[int] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ : str = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[Any] , a : Union[str, Any] , a : Optional[int]=1E-5 , a : List[Any]="outputs" , a : str=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(a , a , a , a , a , a )
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
lowerCamelCase__ = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BertTokenizer
def __init__( self : List[str] , a : Optional[Any]=None , a : Any=None , a : str=True , a : Dict="[UNK]" , a : List[str]="[SEP]" , a : Optional[Any]="[PAD]" , a : List[str]="[CLS]" , a : int="[MASK]" , a : str=True , a : Optional[int]=None , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowerCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Any = getattr(a , normalizer_state.pop('type' ) )
lowerCAmelCase__ : Tuple = do_lower_case
lowerCAmelCase__ : Any = strip_accents
lowerCAmelCase__ : Any = tokenize_chinese_chars
lowerCAmelCase__ : Union[str, Any] = normalizer_class(**a )
lowerCAmelCase__ : str = do_lower_case
def _lowerCamelCase ( self : List[Any] , a : Optional[Any] , a : Any=None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Optional[int] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv2ImageProcessor'
lowercase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , a : Optional[int]=None , a : List[str]=None , **a : Tuple ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : str = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : str , a : Dict , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowerCAmelCase__ : Optional[Any] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Optional[Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : Union[str, Any] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : Union[str, Any] = images
return encoded_inputs
def _lowerCamelCase ( self : str , a : str , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Tuple , **a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : List[str] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['input_features']
def __init__( self : int , a : Dict=80 , a : Dict=16_000 , a : Any=160 , a : str=30 , a : List[str]=400 , a : Any=0.0 , a : List[str]=False , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , )
lowerCAmelCase__ : List[str] = n_fft
lowerCAmelCase__ : Union[str, Any] = hop_length
lowerCAmelCase__ : Any = chunk_length
lowerCAmelCase__ : Optional[Any] = chunk_length * sampling_rate
lowerCAmelCase__ : List[Any] = self.n_samples // hop_length
lowerCAmelCase__ : str = sampling_rate
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , )
def _lowerCamelCase ( self : Optional[Any] , a : np.array ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowerCAmelCase__ : List[str] = log_spec[:, :-1]
lowerCAmelCase__ : str = np.maximum(a , log_spec.max() - 8.0 )
lowerCAmelCase__ : List[str] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowerCamelCase ( a : List[np.ndarray] , a : List[np.ndarray] , a : float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ : Any = np.array(a , np.intaa )
lowerCAmelCase__ : Union[str, Any] = []
for vector, length in zip(a , attention_mask.sum(-1 ) ):
lowerCAmelCase__ : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase__ : Dict = padding_value
normed_input_values.append(a )
else:
lowerCAmelCase__ : str = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : bool = True , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = None , a : Optional[str] = "max_length" , a : Optional[int] = None , a : Optional[int] = None , a : Optional[bool] = None , **a : Optional[Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : List[Any] = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : Dict = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Union[str, Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : List[Any] = [np.asarray([raw_speech] ).T]
lowerCAmelCase__ : int = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowerCAmelCase__ : List[str] = self.pad(
a , padding=a , max_length=max_length if max_length else self.n_samples , truncation=a , pad_to_multiple_of=a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase__ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowerCAmelCase__ : Union[str, Any] = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowerCAmelCase__ : Union[str, Any] = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowerCAmelCase__ : str = [self._np_extract_fbank_features(a ) for waveform in input_features[0]]
if isinstance(input_features[0] , a ):
lowerCAmelCase__ : str = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase__ : Optional[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase__ : int = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase__ : Optional[int] = padded_inputs.convert_to_tensors(a )
return padded_inputs
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'OwlViTImageProcessor'
lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a : Tuple=None , a : int=None , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : Tuple = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : Tuple , a : str=None , a : Dict=None , a : List[Any]=None , a : Optional[Any]="max_length" , a : str="np" , **a : Union[str, Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a , a ) or (isinstance(a , a ) and not isinstance(text[0] , a )):
lowerCAmelCase__ : List[str] = [self.tokenizer(a , padding=a , return_tensors=a , **a )]
elif isinstance(a , a ) and isinstance(text[0] , a ):
lowerCAmelCase__ : Optional[int] = []
# Maximum number of queries across batch
lowerCAmelCase__ : List[Any] = max([len(a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a ) != max_num_queries:
lowerCAmelCase__ : Optional[int] = t + [' '] * (max_num_queries - len(a ))
lowerCAmelCase__ : str = self.tokenizer(a , padding=a , return_tensors=a , **a )
encodings.append(a )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCAmelCase__ : List[str] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : Union[str, Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase__ : Tuple = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : List[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase__ : Union[str, Any] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase__ : Union[str, Any] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : Optional[int] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCAmelCase__ : Optional[int] = BatchEncoding()
lowerCAmelCase__ : Dict = input_ids
lowerCAmelCase__ : Dict = attention_mask
if query_images is not None:
lowerCAmelCase__ : List[str] = BatchEncoding()
lowerCAmelCase__ : Union[str, Any] = self.image_processor(
a , return_tensors=a , **a ).pixel_values
lowerCAmelCase__ : Union[str, Any] = query_pixel_values
if images is not None:
lowerCAmelCase__ : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowerCAmelCase__ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _lowerCamelCase ( self : List[Any] , *a : int , **a : Tuple ):
'''simple docstring'''
return self.image_processor.post_process(*a , **a )
def _lowerCamelCase ( self : int , *a : Tuple , **a : List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : Tuple , **a : Optional[int] ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*a , **a )
def _lowerCamelCase ( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Optional[Any] , *a : List[str] , **a : int ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
class A__ ( __magic_name__ ):
pass
class A__ ( __magic_name__ ):
pass
class A__ :
def __init__( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [
[],
[],
[],
]
def _lowerCamelCase ( self : Optional[int] , a : int , a : int ):
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any ):
'''simple docstring'''
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class A__ :
def __init__( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = []
def _lowerCamelCase ( self : Union[str, Any] , a : int ):
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
lowerCAmelCase__ : Tuple = min(self.queue )
self.queue.remove(a )
return data
def __str__( self : Optional[Any] ):
'''simple docstring'''
return str(self.queue )
def lowerCAmelCase__ ( ) -> List[str]:
lowerCAmelCase__ : Dict = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
from collections.abc import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
lowerCAmelCase__ : float = a
lowerCAmelCase__ : float = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowerCAmelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
lowerCAmelCase__ : List[Any] = mid
else:
lowerCAmelCase__ : List[Any] = mid
lowerCAmelCase__ : List[str] = start + (end - start) / 2.0
return mid
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[Any] , a : str , a : List[Any]=7 , a : Dict=3 , a : Optional[int]=18 , a : Tuple=30 , a : Optional[int]=400 , a : Any=True , a : List[Any]=None , a : List[str]=True , a : Tuple=[0.5, 0.5, 0.5] , a : str=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : List[str] = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : List[Any] = do_resize
lowerCAmelCase__ : Tuple = size
lowerCAmelCase__ : Optional[int] = do_normalize
lowerCAmelCase__ : Optional[int] = image_mean
lowerCAmelCase__ : int = image_std
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase__ : Optional[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowerCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase__ : int = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) -> int:
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ : List[str] = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : List[Any] = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='longest' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE_ ) == "1":
lowerCAmelCase__ : Any = 2
# Initialize accelerator
lowerCAmelCase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : int = config['lr']
lowerCAmelCase__ : List[str] = int(config['num_epochs'] )
lowerCAmelCase__ : Optional[Any] = int(config['seed'] )
lowerCAmelCase__ : Optional[int] = int(config['batch_size'] )
lowerCAmelCase__ : List[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Tuple = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowerCAmelCase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Dict = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = outputs.loss
lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ : int = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ : Any = parser.parse_args()
lowerCAmelCase__ : List[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : str = args.log_outputs
lowerCAmelCase__ : str = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : Optional[int] = load_metric('wer' )
lowerCAmelCase__ : Optional[int] = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : str = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : int = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : Union[str, Any] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : Any = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Any = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[Any] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : Optional[int] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : int = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase__ : Optional[Any] = ' '.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# load dataset
lowerCAmelCase__ : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : str = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : Union[str, Any] = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : Optional[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : List[Any] = prediction['text']
lowerCAmelCase__ : List[Any] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : int = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
import numpy
# List of input, output pairs
lowerCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCamelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCamelCase__ = [2, 4, 1, 5]
lowerCamelCase__ = len(train_data)
lowerCamelCase__ = 0.009
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" ) -> Dict:
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=m ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : int = summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def lowerCAmelCase__ ( ) -> str:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase__ : List[Any] = 0.000002
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : List[str] = 0
while True:
j += 1
lowerCAmelCase__ : Optional[Any] = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Union[str, Any] = get_cost_derivative(i - 1 )
lowerCAmelCase__ : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
lowerCAmelCase__ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def lowerCAmelCase__ ( ) -> str:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('Actual output value:', output(SCREAMING_SNAKE_CASE_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = 'maskformer-swin'
lowercase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , a : List[str]=224 , a : Union[str, Any]=4 , a : Optional[int]=3 , a : List[str]=96 , a : int=[2, 2, 6, 2] , a : Optional[Any]=[3, 6, 12, 24] , a : Optional[int]=7 , a : Union[str, Any]=4.0 , a : Dict=True , a : List[Any]=0.0 , a : List[Any]=0.0 , a : Optional[int]=0.1 , a : int="gelu" , a : Tuple=False , a : Union[str, Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]=None , a : Optional[Any]=None , **a : Optional[Any] , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : Optional[int] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : int = embed_dim
lowerCAmelCase__ : Union[str, Any] = depths
lowerCAmelCase__ : List[str] = len(a )
lowerCAmelCase__ : str = num_heads
lowerCAmelCase__ : Any = window_size
lowerCAmelCase__ : List[Any] = mlp_ratio
lowerCAmelCase__ : Dict = qkv_bias
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Union[str, Any] = use_absolute_embeddings
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(a ) - 1) )
lowerCAmelCase__ : List[Any] = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(a ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'rwkv'
lowercase = {'max_position_embeddings': 'context_length'}
def __init__( self : List[str] , a : str=50_277 , a : int=1_024 , a : List[str]=4_096 , a : Optional[Any]=32 , a : Any=None , a : Optional[Any]=None , a : List[Any]=1E-5 , a : Dict=0 , a : List[str]=0 , a : Any=6 , a : Union[str, Any]=False , a : Dict=True , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Tuple = context_length
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCAmelCase__ : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCAmelCase__ : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase__ : Union[str, Any] = rescale_every
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : List[str] = bos_token_id
lowerCAmelCase__ : int = eos_token_id
super().__init__(
tie_word_embeddings=a , bos_token_id=a , eos_token_id=a , **a )
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowerCamelCase__ = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
lowerCamelCase__ = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
lowerCamelCase__ = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Tuple , a : Tuple ):
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def _lowerCamelCase ( self : int , a : str , a : int , a : int=0.9 , a : Union[str, Any]=3 , a : Dict=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
lowerCAmelCase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(a ) , word_tokenize(a ) , alpha=a , beta=a , gamma=a )
for ref, pred in zip(a , a )
]
else:
lowerCAmelCase__ : int = [
meteor_score.single_meteor_score(a , a , alpha=a , beta=a , gamma=a )
for ref, pred in zip(a , a )
]
return {"meteor": np.mean(a )}
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
lowerCamelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ : List[str] = json.loads(f.read() )
lowerCAmelCase__ : str = collections.OrderedDict()
lowerCAmelCase__ : Tuple = collections.OrderedDict()
lowerCAmelCase__ : str = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = b
lowerCAmelCase__ : int = idx
for wd in b:
lowerCAmelCase__ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Tuple , a : Union[str, Any] , a : str="<|endoftext|>" , a : Optional[Any]="<|endoftext|>" , a : Dict="<|startoftext|>" , a : int="<|endoftext|>" , a : List[str]=False , **a : List[str] , ):
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(a ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCAmelCase__ : Any = do_clean_text
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = load_vocab_and_emoji(a , a )
lowerCAmelCase__ : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return len(self.raw_vocab )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowerCamelCase ( self : Optional[Any] , a : Any ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text )
def _lowerCamelCase ( self : Tuple , a : Any ):
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a )
def _lowerCamelCase ( self : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = ''.join(a ).strip()
return out_string
def _lowerCamelCase ( self : List[str] , a : "Conversation" ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
lowerCAmelCase__ : str = input_ids[-self.model_max_length :]
return input_ids
def _lowerCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = 0
if os.path.isdir(a ):
lowerCAmelCase__ : List[str] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCAmelCase__ : Any = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCAmelCase__ : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase__ : List[str] = token_index
writer.write(','.join(a ) + '\n' )
index += 1
with open(a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , a )
return vocab_file, emoji_file
class A__ ( __magic_name__ ):
def __init__( self : Tuple , a : str , a : str , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = vocab # same as swe
lowerCAmelCase__ : Optional[int] = ids_to_tokens # same as bpe
lowerCAmelCase__ : str = emoji
lowerCAmelCase__ : Any = np.max([len(a ) for w in self.vocab.keys()] )
lowerCAmelCase__ : Union[str, Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCAmelCase__ : int = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCAmelCase__ : Any = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCAmelCase__ : Dict = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase__ : Union[str, Any] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase__ : Any = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCAmelCase__ : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCAmelCase__ : Tuple = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCAmelCase__ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def _lowerCamelCase ( self : Any , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<URL>' , a )
lowerCAmelCase__ : Optional[int] = self.content_repattera.sub('<EMAIL>' , a )
lowerCAmelCase__ : Tuple = self.content_repattera.sub('<TEL>' , a )
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<DATE>' , a )
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<DATE>' , a )
lowerCAmelCase__ : str = self.content_repattera.sub('<PRICE>' , a )
lowerCAmelCase__ : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ : str = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple=False ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = text.replace(' ' , '<SP>' )
lowerCAmelCase__ : Union[str, Any] = text.replace(' ' , '<SP>' )
lowerCAmelCase__ : Union[str, Any] = text.replace('\r\n' , '<BR>' )
lowerCAmelCase__ : Union[str, Any] = text.replace('\n' , '<BR>' )
lowerCAmelCase__ : Optional[int] = text.replace('\r' , '<BR>' )
lowerCAmelCase__ : Dict = text.replace('\t' , '<TAB>' )
lowerCAmelCase__ : str = text.replace('—' , 'ー' )
lowerCAmelCase__ : Optional[int] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ : List[Any] = text.replace(a , a )
if clean:
lowerCAmelCase__ : List[str] = self.clean_text(a )
def check_simbol(a : Dict ):
lowerCAmelCase__ : List[Any] = x.encode()
if len(a ) == 1 and len(a ) == 2:
lowerCAmelCase__ : Optional[int] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2a1 and c <= 0Xc_2bf)
or (c >= 0Xc_780 and c <= 0Xc_783)
or (c >= 0Xc_ab9 and c <= 0Xc_bbf)
or (c >= 0Xc_c80 and c <= 0Xc_da2)
):
return True
return False
def checkuae(a : Optional[int] ):
lowerCAmelCase__ : Optional[int] = x.encode()
if len(a ) == 1 and len(a ) == 3:
lowerCAmelCase__ : Optional[int] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28_080 and c <= 0Xe2b_07f:
return True
return False
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = []
while pos < len(a ):
lowerCAmelCase__ : Optional[Any] = min(len(a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCAmelCase__ : str = [] # (token_id, token, pos)
for e in range(a , a , -1 ):
lowerCAmelCase__ : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a ) > 2:
lowerCAmelCase__ : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = sorted(a , key=lambda a : x[0] )[0]
result.append(a )
lowerCAmelCase__ : Union[str, Any] = e
else:
lowerCAmelCase__ : Dict = pos + 1
lowerCAmelCase__ : List[Any] = text[pos:end]
if check_simbol(a ):
result.append('<KIGOU>' )
elif checkuae(a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCAmelCase__ : Union[str, Any] = end
return result
def _lowerCamelCase ( self : int , a : List[Any] , a : Tuple="\n" ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase__ : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(a )
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase__ : Optional[Any] = ''.join(a )
return text
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Tuple:
if config_name_or_path is None:
lowerCAmelCase__ : str = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCAmelCase__ : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase__ : Optional[int] = question_encoder_name_or_path
lowerCAmelCase__ : Any = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCAmelCase__ : List[str] = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = gen_config
lowerCAmelCase__ : Any = question_encoder_config
lowerCAmelCase__ : Optional[int] = model_class.from_pretrained_question_encoder_generator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
rag_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Sanity check.
model_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Save tokenizers.
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(a )
lowerCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ : str = model.generate(**a )
lowerCAmelCase__ : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ : List[str] = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
lowerCAmelCase__ : str = model.reverse_bettertransformer()
model.save_pretrained(a )
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) -> Dict:
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : List[str] = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='longest' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ : int = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE_ ) == "1":
lowerCAmelCase__ : List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCAmelCase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Optional[int] = config['lr']
lowerCAmelCase__ : List[Any] = int(config['num_epochs'] )
lowerCAmelCase__ : List[str] = int(config['seed'] )
lowerCAmelCase__ : List[Any] = int(config['batch_size'] )
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowerCAmelCase__ : Dict = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase__ : int = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('.' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase__ : int = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Dict = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase__ : int = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'epoch': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) -> List[str]:
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=SCREAMING_SNAKE_CASE_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = IFPipeline
lowercase = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self._get_dummy_components()
def _lowerCamelCase ( self : Tuple , a : Any , a : List[Any]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_save_load_local()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
lowerCAmelCase__ : List[Any] = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=a , tokenizer=a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a , a , a , a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase__ : Dict = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase__ : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a , a , a , a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase__ : Optional[int] = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase__ : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a , a , a , a )
def _lowerCamelCase ( self : str , a : Any , a : Optional[Any] , a : Any , a : List[str] ):
'''simple docstring'''
_start_torch_memory_measurement()
lowerCAmelCase__ : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : str = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , num_inference_steps=2 , generator=a , output_type='np' , )
lowerCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(a , a )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase__ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : List[str] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , generator=a , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(a , a )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : int , a : Optional[Any] , a : str ):
'''simple docstring'''
_start_torch_memory_measurement()
lowerCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : str = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , num_inference_steps=2 , generator=a , output_type='np' , )
lowerCAmelCase__ : str = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(a , a )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : Optional[Any] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , original_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(a , a )
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Any , a : List[Any] , a : int ):
'''simple docstring'''
_start_torch_memory_measurement()
lowerCAmelCase__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a )
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , mask_image=a , num_inference_steps=2 , generator=a , output_type='np' , )
lowerCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(a , a )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a )
lowerCAmelCase__ : List[str] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , mask_image=a , original_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase__ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(a , a )
def lowerCAmelCase__ ( ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = DPTConfig()
if "large" in checkpoint_url:
lowerCAmelCase__ : str = 1_024
lowerCAmelCase__ : str = 4_096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Dict = [5, 11, 17, 23]
lowerCAmelCase__ : Optional[int] = [256, 512, 1_024, 1_024]
lowerCAmelCase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = 150
lowerCAmelCase__ : str = 'huggingface/label-files'
lowerCAmelCase__ : List[str] = 'ade20k-id2label.json'
lowerCAmelCase__ : Any = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase__ : Optional[int] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase__ : str = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase__ : Optional[int] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase__ : Any = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
lowerCAmelCase__ : Optional[int] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase__ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase__ : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
lowerCAmelCase__ : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase__ : Any = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCAmelCase__ : Tuple = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCAmelCase__ : Dict = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase__ : List[str] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase__ : Optional[int] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCAmelCase__ : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase__ : List[str] = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCAmelCase__ : Optional[int] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase__ : str = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase__ : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase__ : Tuple = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase__ : str = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase__ : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase__ : List[str] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase__ : Tuple = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase__ : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase__ : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase__ : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : int = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_dpt_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict from URL
lowerCAmelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : str = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCAmelCase__ : Any = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# Check outputs on an image
lowerCAmelCase__ : str = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase__ : int = DPTImageProcessor(size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
# forward pass
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).logits if 'ade' in checkpoint_url else model(**SCREAMING_SNAKE_CASE_ ).predicted_depth
# Assert logits
lowerCAmelCase__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowerCAmelCase__ : str = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE_ )
)
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
if rng is None:
lowerCAmelCase__ : int = global_rng
lowerCAmelCase__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self : Dict , a : Optional[Any] , a : Union[str, Any]=7 , a : Union[str, Any]=400 , a : Dict=2_000 , a : int=10 , a : Optional[Any]=160 , a : Any=8 , a : Optional[Any]=0.0 , a : Tuple=4_000 , a : str=False , a : Optional[Any]=True , ):
'''simple docstring'''
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : List[Any] = max_seq_length
lowerCAmelCase__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Dict = padding_value
lowerCAmelCase__ : Optional[int] = sampling_rate
lowerCAmelCase__ : Tuple = return_attention_mask
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : Optional[Any] = feature_size
lowerCAmelCase__ : List[Any] = chunk_length
lowerCAmelCase__ : Optional[Any] = hop_length
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self : List[str] , a : str=False , a : List[Any]=False ):
'''simple docstring'''
def _flatten(a : Tuple ):
return list(itertools.chain(*a ) )
if equal_length:
lowerCAmelCase__ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : List[str] = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Tuple = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class.from_pretrained(a )
lowerCAmelCase__ : str = feat_extract_first.to_dict()
lowerCAmelCase__ : List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ : int = feat_extract_first.mel_filters
lowerCAmelCase__ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Tuple = os.path.join(a , 'feat_extract.json' )
feat_extract_first.to_json_file(a )
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class.from_json_file(a )
lowerCAmelCase__ : Any = feat_extract_first.to_dict()
lowerCAmelCase__ : Tuple = feat_extract_second.to_dict()
lowerCAmelCase__ : Optional[Any] = feat_extract_first.mel_filters
lowerCAmelCase__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : int = [np.asarray(a ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ : int = feature_extractor(a , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ : List[Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ : Any = feature_extractor(a , return_tensors='np' ).input_features
lowerCAmelCase__ : Dict = feature_extractor(a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : List[Any] = np.asarray(a )
lowerCAmelCase__ : Dict = feature_extractor(a , return_tensors='np' ).input_features
lowerCAmelCase__ : Any = feature_extractor(a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCAmelCase__ : int = [np.asarray(a ) for speech_input in speech_inputs]
lowerCAmelCase__ : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase__ : str = [np.asarray(a ) for speech_input in speech_inputs_truncated]
lowerCAmelCase__ : str = feature_extractor(a , return_tensors='np' ).input_features
lowerCAmelCase__ : Tuple = feature_extractor(a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
import torch
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCAmelCase__ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ : Union[str, Any] = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
lowerCAmelCase__ : int = self._load_datasamples(1 )
lowerCAmelCase__ : str = WhisperFeatureExtractor()
lowerCAmelCase__ : Optional[Any] = feature_extractor(a , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , a , atol=1E-4 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Union[str, Any] = self._load_datasamples(1 )[0]
lowerCAmelCase__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowerCAmelCase__ : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=a )[0]
self.assertTrue(np.all(np.mean(a ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a ) - 1 ) < 1E-3 ) )
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum ):
lowercase = 0
lowercase = 1
lowercase = 2
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[int] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase__ : str = None
if self.model.config.prefix is not None:
lowerCAmelCase__ : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase__ : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self._sanitize_parameters(prefix=a , **self._forward_params )
lowerCAmelCase__ : Any = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase__ : Optional[int] = {**self._forward_params, **forward_params}
def _lowerCamelCase ( self : Optional[Any] , a : Any=None , a : Optional[int]=None , a : str=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : List[Any]=None , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {}
if prefix is not None:
lowerCAmelCase__ : int = prefix
if prefix:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
lowerCAmelCase__ : Dict = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
lowerCAmelCase__ : List[str] = handle_long_generation
preprocess_params.update(a )
lowerCAmelCase__ : Any = generate_kwargs
lowerCAmelCase__ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ : Any = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowerCAmelCase__ : Any = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self : Dict , *a : str , **a : List[Any] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self : str , a : Any , **a : Optional[Any] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : str , a : int , a : Tuple="" , a : Optional[int]=None , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
lowerCAmelCase__ : Optional[int] = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase__ : Optional[int] = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase__ : Dict = generate_kwargs['max_new_tokens']
else:
lowerCAmelCase__ : Tuple = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase__ : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
lowerCAmelCase__ : Union[str, Any] = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase__ : str = inputs['attention_mask'][:, -keep_length:]
return inputs
def _lowerCamelCase ( self : Dict , a : Optional[Any] , **a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = model_inputs['input_ids']
lowerCAmelCase__ : Optional[Any] = model_inputs.get('attention_mask' , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Any = 1
else:
lowerCAmelCase__ : Tuple = input_ids.shape[0]
lowerCAmelCase__ : Dict = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase__ : Optional[Any] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
lowerCAmelCase__ : int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase__ : Optional[int] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase__ : List[Any] = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase__ : Any = self.model.generate(input_ids=a , attention_mask=a , **a )
lowerCAmelCase__ : List[str] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase__ : Union[str, Any] = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : Optional[int]=ReturnType.FULL_TEXT , a : List[Any]=True ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = model_outputs['generated_sequence'][0]
lowerCAmelCase__ : Dict = model_outputs['input_ids']
lowerCAmelCase__ : Dict = model_outputs['prompt_text']
lowerCAmelCase__ : Dict = generated_sequence.numpy().tolist()
lowerCAmelCase__ : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase__ : Tuple = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase__ : int = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase__ : Optional[Any] = 0
else:
lowerCAmelCase__ : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase__ : List[str] = prompt_text + text[prompt_length:]
else:
lowerCAmelCase__ : Any = text[prompt_length:]
lowerCAmelCase__ : int = {'generated_text': all_text}
records.append(a )
return records
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self : Optional[int] , a : Dict , a : Tuple=13 , a : Union[str, Any]=30 , a : int=2 , a : Dict=3 , a : Optional[int]=True , a : Dict=True , a : Union[str, Any]=32 , a : List[Any]=5 , a : str=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Optional[int]=0.1 , a : int=0.1 , a : List[str]=10 , a : str=0.0_2 , a : Union[str, Any]=None , a : Optional[int]=2 , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : Optional[int] = num_patches + 1
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[Any] , a : int , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Optional[int] , a : Any , a : Dict , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : List[str] , a : Tuple , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = ViTForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[str] = ViTForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Dict = config_and_inputs
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = ViTModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(a )
lowerCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : Dict = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**a )
# verify the logits
lowerCAmelCase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowerCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : Optional[int] = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(a , interpolate_pos_encoding=a )
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowerCAmelCase__ : int = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : str = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a )
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.dummy_uncond_unet
lowerCAmelCase__ : List[Any] = DDIMScheduler()
lowerCAmelCase__ : Optional[int] = self.dummy_vq_model
lowerCAmelCase__ : str = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
lowerCAmelCase__ : int = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : str = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Any = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
lowerCAmelCase__ : Union[str, Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
lowerCAmelCase__ : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
import doctest
from collections import deque
import numpy as np
class A__ :
def __init__( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = [2, 1, 2, -1]
lowerCAmelCase__ : Union[str, Any] = [1, 2, 3, 4]
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = len(self.first_signal )
lowerCAmelCase__ : Optional[Any] = len(self.second_signal )
lowerCAmelCase__ : str = max(a , a )
# create a zero matrix of max_length x max_length
lowerCAmelCase__ : str = [[0] * max_length for i in range(a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(a ):
lowerCAmelCase__ : int = deque(self.second_signal )
rotated_signal.rotate(a )
for j, item in enumerate(a ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCAmelCase__ : Union[str, Any] = np.matmul(np.transpose(a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> int:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class A__ :
lowercase = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
lowercase = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
lowercase = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'Benchmark training of model'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'Verbose memory tracing'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
lowercase = field(
default=__magic_name__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
lowercase = field(default=__magic_name__ , metadata={'help': 'Trace memory line by line'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'Save result to a CSV file'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'Save all print statements in a log file'} )
lowercase = field(default=__magic_name__ , metadata={'help': 'Whether to print environment information'} )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
lowercase = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
lowercase = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
lowercase = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
lowercase = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
lowercase = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving environment information.'} , )
lowercase = field(
default=F"""log_{round(time() )}.csv""" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
lowercase = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
lowercase = field(
default=__magic_name__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , a , )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num < 0:
return False
lowerCAmelCase__ : int = num
lowerCAmelCase__ : int = 0
while num > 0:
lowerCAmelCase__ : Any = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(SCREAMING_SNAKE_CASE_ ), magnitude * sin(SCREAMING_SNAKE_CASE_ )]
return [magnitude * cos(radians(SCREAMING_SNAKE_CASE_ ) ), magnitude * sin(radians(SCREAMING_SNAKE_CASE_ ) )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10**-1 ) -> bool:
lowerCAmelCase__ : NDArray[floataa] = cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : float = sum(SCREAMING_SNAKE_CASE_ )
return abs(SCREAMING_SNAKE_CASE_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
lowerCamelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
lowerCamelCase__ = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ElectraTokenizer
def __init__( self : int , a : str=None , a : int=None , a : List[Any]=True , a : Union[str, Any]="[UNK]" , a : Dict="[SEP]" , a : Union[str, Any]="[PAD]" , a : str="[CLS]" , a : Optional[int]="[MASK]" , a : Union[str, Any]=True , a : Any=None , **a : List[str] , ):
'''simple docstring'''
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowerCAmelCase__ : str = getattr(a , normalizer_state.pop('type' ) )
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : List[str] = strip_accents
lowerCAmelCase__ : Dict = tokenize_chinese_chars
lowerCAmelCase__ : List[Any] = normalizer_class(**a )
lowerCAmelCase__ : List[Any] = do_lower_case
def _lowerCamelCase ( self : Any , a : Tuple , a : Tuple=None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
import random
from typing import Any
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[Any]:
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Optional[int] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowerCAmelCase__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase__ : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = ['key_proj', 'value_proj', 'query_proj']
lowerCAmelCase__ : Union[str, Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowerCAmelCase__ : Optional[Any] = key.split('.' )
if attributes[0] == "lm_head":
lowerCAmelCase__ : Dict = prophet
lowerCAmelCase__ : Tuple = prophet_old
else:
lowerCAmelCase__ : Optional[Any] = prophet.prophetnet
lowerCAmelCase__ : Optional[int] = prophet_old.model
lowerCAmelCase__ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase__ : List[Any] = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : Union[str, Any] = attribute
elif hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase__ : Optional[Any] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
lowerCAmelCase__ : Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase__ : Optional[Any] = old_model.bias
logger.info(F'''{attribute} is initialized''' )
lowerCAmelCase__ : str = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE_ , 'in_proj_weight' ):
lowerCAmelCase__ : Dict = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase__ : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase__ : List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase__ : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase__ : List[str] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase__ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase__ : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase__ : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase__ : str = True
break
if attribute.isdigit():
lowerCAmelCase__ : Any = model[int(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[int] = old_model[int(SCREAMING_SNAKE_CASE_ )]
else:
lowerCAmelCase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if old_attribute == "":
lowerCAmelCase__ : str = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase__ : Tuple = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : int , *a : Any , **a : Tuple ):
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCamelCase__ = None
lowerCamelCase__ = {
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
lowerCamelCase__ = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=256 ) -> Any:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , 'tmp' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = read_json(os.path.join(SCREAMING_SNAKE_CASE_ , 'params.json' ) )
lowerCAmelCase__ : Optional[int] = NUM_SHARDS[model_size]
lowerCAmelCase__ : List[Any] = params['n_layers']
lowerCAmelCase__ : int = params['n_heads']
lowerCAmelCase__ : str = n_heads // num_shards
lowerCAmelCase__ : Tuple = params['dim']
lowerCAmelCase__ : Optional[Any] = dim // n_heads
lowerCAmelCase__ : Dict = 10000.0
lowerCAmelCase__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase__ : Any = params['n_kv_heads'] # for GQA / MQA
lowerCAmelCase__ : Tuple = n_heads_per_shard // num_key_value_heads
lowerCAmelCase__ : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase__ : Optional[int] = n_heads
lowerCAmelCase__ : List[Any] = n_heads_per_shard
lowerCAmelCase__ : int = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=n_heads , SCREAMING_SNAKE_CASE_=dim , SCREAMING_SNAKE_CASE_=dim ):
return w.view(SCREAMING_SNAKE_CASE_ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase__ : Any = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
lowerCAmelCase__ : str = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , F'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(SCREAMING_SNAKE_CASE_ )
]
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = {'weight_map': {}}
for layer_i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
lowerCAmelCase__ : Any = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase__ : int = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
lowerCAmelCase__ : List[Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Tuple = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Any = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowerCAmelCase__ : Tuple = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowerCAmelCase__ : List[str] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=0 )
lowerCAmelCase__ : str = inv_freq
for k, v in state_dict.items():
lowerCAmelCase__ : List[Any] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Optional[Any] = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
lowerCAmelCase__ : Tuple = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
lowerCAmelCase__ : Any = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase__ : Any = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Write configs
lowerCAmelCase__ : Optional[Any] = {'total_size': param_count * 2}
write_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'pytorch_model.bin.index.json' ) )
lowerCAmelCase__ : int = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
lowerCAmelCase__ : Union[str, Any] = params['multiple_of'] if 'multiple_of' in params else 256
lowerCAmelCase__ : Any = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE_ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=SCREAMING_SNAKE_CASE_ , )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
lowerCAmelCase__ : Dict = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ , safe_serialization=SCREAMING_SNAKE_CASE_ )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# Initialize the tokenizer based on the `spm` model
lowerCAmelCase__ : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
lowerCAmelCase__ : str = tokenizer_class(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=SCREAMING_SNAKE_CASE_ , help='Whether or not to save using `safetensors`.' )
lowerCAmelCase__ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase__ : Any = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if n == 0:
return 0
lowerCAmelCase__ : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : int = max(
SCREAMING_SNAKE_CASE_ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE_ ) )
return max_revue
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase__ : Optional[int] = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : Optional[int] = max(
SCREAMING_SNAKE_CASE_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
lowerCAmelCase__ : List[str] = max_revenue
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase__ : int = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase__ : List[Any] = 0
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[str] = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase__ : Tuple = max(SCREAMING_SNAKE_CASE_ , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase__ : Union[str, Any] = max_revenue_i
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if n < 0:
lowerCAmelCase__ : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if n > len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : int = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE_ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : Any = [6, 10, 12, 15, 20, 23]
lowerCAmelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase__ : List[Any] = 36
lowerCAmelCase__ : Dict = top_down_cut_rod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 307
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class A__ :
def __init__( self : Tuple , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : int = (
os.path.join(a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase__ : int = Extractor
def _lowerCamelCase ( self : str , a : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase__ : List[Any] = os.path.abspath(a )
return os.path.join(self.extract_dir , hash_url_to_filename(a ) )
def _lowerCamelCase ( self : List[Any] , a : str , a : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(a ) and not (os.path.isdir(a ) and os.listdir(a ))
)
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : bool = False ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.extractor.infer_extractor_format(a )
if not extractor_format:
return input_path
lowerCAmelCase__ : Dict = self._get_output_path(a )
if self._do_extract(a , a ):
self.extractor.extract(a , a , a )
return output_path
class A__ ( __magic_name__ ):
@classmethod
@abstractmethod
def _lowerCamelCase ( cls : Optional[Any] , a : Union[Path, str] , **a : int ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
...
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = []
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : int ):
'''simple docstring'''
with open(a , 'rb' ) as f:
return f.read(a )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowerCAmelCase__ : List[Any] = max(len(a ) for cls_magic_number in cls.magic_numbers )
try:
lowerCAmelCase__ : List[Any] = cls.read_magic_number(a , a )
except OSError:
return False
return any(magic_number.startswith(a ) for cls_magic_number in cls.magic_numbers )
class A__ ( __magic_name__ ):
@classmethod
def _lowerCamelCase ( cls : Dict , a : Union[Path, str] , **a : List[str] ):
'''simple docstring'''
return tarfile.is_tarfile(a )
@staticmethod
def _lowerCamelCase ( a : int , a : List[str] ):
'''simple docstring'''
def resolved(a : str ) -> str:
return os.path.realpath(os.path.abspath(a ) )
def badpath(a : str , a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a , a ) ).startswith(a )
def badlink(a : Tuple , a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase__ : List[Any] = resolved(os.path.join(a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a )
lowerCAmelCase__ : List[Any] = resolved(a )
for finfo in members:
if badpath(finfo.name , a ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(a , a ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(a , a ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Optional[int] = tarfile.open(a )
tar_file.extractall(a , members=TarExtractor.safemembers(a , a ) )
tar_file.close()
class A__ ( __magic_name__ ):
lowercase = [b'\x1F\x8B']
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with gzip.open(a , 'rb' ) as gzip_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A__ ( __magic_name__ ):
lowercase = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def _lowerCamelCase ( cls : List[Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(a , magic_number=a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a , 'rb' ) as fp:
lowerCAmelCase__ : Any = _EndRecData(a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase__ : Union[str, Any] = fp.read(a ) # CD is where we expect it to be
if len(a ) == sizeCentralDir:
lowerCAmelCase__ : List[Any] = struct.unpack(a , a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
with zipfile.ZipFile(a , 'r' ) as zip_file:
zip_file.extractall(a )
zip_file.close()
class A__ ( __magic_name__ ):
lowercase = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with lzma.open(a ) as compressed_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A__ ( __magic_name__ ):
lowercase = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : str = rarfile.RarFile(a )
rf.extractall(a )
rf.close()
class A__ ( __magic_name__ ):
lowercase = [b'\x28\xb5\x2F\xFD']
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
lowerCAmelCase__ : Optional[Any] = zstd.ZstdDecompressor()
with open(a , 'rb' ) as ifh, open(a , 'wb' ) as ofh:
dctx.copy_stream(a , a )
class A__ ( __magic_name__ ):
lowercase = [b'\x42\x5A\x68']
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with bza.open(a , 'rb' ) as compressed_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A__ ( __magic_name__ ):
lowercase = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(a , exist_ok=a )
with pyazr.SevenZipFile(a , 'r' ) as archive:
archive.extractall(a )
class A__ ( __magic_name__ ):
lowercase = [b'\x04\x22\x4D\x18']
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(a , 'rb' ) as compressed_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowerCamelCase ( cls : Dict ):
'''simple docstring'''
return max(
len(a )
for extractor in cls.extractors.values()
if issubclass(a , a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowerCamelCase ( a : Union[Path, str] , a : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(a , magic_number_length=a )
except OSError:
return b""
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , a : Union[Path, str] , a : bool = False ):
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=a , )
lowerCAmelCase__ : Dict = cls.infer_extractor_format(a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , a : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = cls._get_magic_number_max_length()
lowerCAmelCase__ : Optional[Any] = cls._read_magic_number(a , a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a , magic_number=a ):
return extractor_format
@classmethod
def _lowerCamelCase ( cls : Tuple , a : Union[Path, str] , a : Union[Path, str] , a : Optional[str] = None , a : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(a ) , exist_ok=a )
# Prevent parallel extractions
lowerCAmelCase__ : List[str] = str(Path(a ).with_suffix('.lock' ) )
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a , a ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=a , )
lowerCAmelCase__ : Optional[int] = extractor if extractor != 'deprecated' else extractor_format
else:
lowerCAmelCase__ : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(a , a )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a ):
return extractor.extract(a , a )
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
lowerCAmelCase__ : Union[str, Any] = 0
while len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCAmelCase__ : Any = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCAmelCase__ : int = files.index(min(SCREAMING_SNAKE_CASE_ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE_ )
files.append(SCREAMING_SNAKE_CASE_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 1
|
import inspect
import unittest
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase__ : Any = inspect.getmembers(a , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase__ : Tuple = 'k-diffusion'
elif backend == "invisible_watermark":
lowerCAmelCase__ : Union[str, Any] = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 307
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCamelCase__ = os.environ.get("""USER_TOKEN""", """""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict[Any, Any]:
lowerCAmelCase__ : Any = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 307
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 1
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase__ = 2_9979_2458
# Symbols
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = symbols("""ct x y z""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
lowerCAmelCase__ : Dict = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase__ = transform(2997_9245)
print("""Example of four vector: """)
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowerCamelCase__ = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 307
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 1
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return (-y * np.log(SCREAMING_SNAKE_CASE_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=70_000 ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : Dict = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = cost_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCamelCase__ = datasets.load_iris()
lowerCamelCase__ = iris.data[:, :2]
lowerCamelCase__ = (iris.target != 0) * 1
lowerCamelCase__ = 0.1
lowerCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((lowerCamelCase__) , (lowerCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((lowerCamelCase__) , (lowerCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((lowerCamelCase__) , (lowerCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowerCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMRobertaTokenizer
lowercase = XLMRobertaTokenizerFast
lowercase = True
lowercase = True
def _lowerCamelCase ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : List[str] = XLMRobertaTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = '<pad>'
lowerCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a ) , 1_002 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = XLMRobertaTokenizer(a , keep_accents=a )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ : Optional[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : int = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = tokenizer_r.save_pretrained(a )
lowerCAmelCase__ : List[str] = tokenizer_p.save_pretrained(a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ : Tuple = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[Any] = tokenizer_r.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ : Tuple = tempfile.mkdtemp()
lowerCAmelCase__ : str = tokenizer_r.save_pretrained(a , legacy_format=a )
lowerCAmelCase__ : Dict = tokenizer_p.save_pretrained(a )
# Checks it save with the same files
self.assertSequenceEqual(a , a )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Dict = tokenizer_r.from_pretrained(a )
lowerCAmelCase__ : Tuple = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = tokenizer_r.save_pretrained(a , legacy_format=a )
lowerCAmelCase__ : Dict = tokenizer_p.save_pretrained(a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(a )
lowerCAmelCase__ : List[str] = tokenizer_p.from_pretrained(a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a , a ) )
shutil.rmtree(a )
@cached_property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a , f.name )
lowerCAmelCase__ : Any = XLMRobertaTokenizer(f.name , keep_accents=a )
lowerCAmelCase__ : Optional[int] = pickle.dumps(a )
pickle.loads(a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[str] = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ : str = tokenizer.tokenize(a )
lowerCAmelCase__ : Dict = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Any = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'Hello World!'
lowerCAmelCase__ : Optional[Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase__ : Optional[int] = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {'input_ids': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 307
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A__ ( __magic_name__ ):
lowercase = 'M-CLIP'
def __init__( self : Optional[int] , a : Union[str, Any]=1_024 , a : Tuple=768 , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = transformerDimSize
lowerCAmelCase__ : int = imageDimSize
super().__init__(**a )
class A__ ( __magic_name__ ):
lowercase = MCLIPConfig
def __init__( self : List[Any] , a : str , *a : int , **a : Optional[Any] ):
'''simple docstring'''
super().__init__(a , *a , **a )
lowerCAmelCase__ : str = XLMRobertaModel(a )
lowerCAmelCase__ : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCamelCase ( self : Optional[int] , a : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.transformer(input_ids=a , attention_mask=a )[0]
lowerCAmelCase__ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
import os
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , 'num.txt' )
with open(SCREAMING_SNAKE_CASE_ ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : List[str] = int(SCREAMING_SNAKE_CASE_ )
assert noofclusters < len(SCREAMING_SNAKE_CASE_ )
# Find out the dimensionality
lowerCAmelCase__ : Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase__ : str = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
shuffle(SCREAMING_SNAKE_CASE_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase__ : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase__ : Dict = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase__ : List[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase__ : Optional[int] = tf.placeholder('float64' , [dim] )
lowerCAmelCase__ : Optional[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase__ : List[str] = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase__ : Tuple = tf.placeholder('int32' )
lowerCAmelCase__ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase__ : int = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase__ : Union[str, Any] = tf.reduce_mean(SCREAMING_SNAKE_CASE_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase__ : Union[str, Any] = tf.placeholder('float' , [dim] )
lowerCAmelCase__ : Any = tf.placeholder('float' , [dim] )
lowerCAmelCase__ : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase__ : Optional[int] = tf.placeholder('float' , [noofclusters] )
lowerCAmelCase__ : List[Any] = tf.argmin(SCREAMING_SNAKE_CASE_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase__ : int = tf.initialize_all_variables()
# Initialize all variables
sess.run(SCREAMING_SNAKE_CASE_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase__ : Any = 100
for _ in range(SCREAMING_SNAKE_CASE_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Any = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase__ : Any = [
sess.run(SCREAMING_SNAKE_CASE_ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase__ : Union[str, Any] = sess.run(
SCREAMING_SNAKE_CASE_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(SCREAMING_SNAKE_CASE_ ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase__ : Tuple = [
vectors[i]
for i in range(len(SCREAMING_SNAKE_CASE_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase__ : Dict = sess.run(
SCREAMING_SNAKE_CASE_ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase__ : List[Any] = sess.run(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sess.run(SCREAMING_SNAKE_CASE_ )
return centroids, assignments
| 307
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 1
|
from __future__ import annotations
from typing import Any
class A__ :
def __init__( self : str , a : int , a : int , a : float = 0 ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = row, column
lowerCAmelCase__ : Any = [[default_value for c in range(a )] for r in range(a )]
def __str__( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase__ : List[Any] = max(a , len(str(a ) ) )
lowerCAmelCase__ : List[Any] = f'''%{max_element_length}s'''
# Make string and return
def single_line(a : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase__ : str = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a ) for row_vector in self.array )
return s
def __repr__( self : int ):
'''simple docstring'''
return str(self )
def _lowerCamelCase ( self : int , a : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(a , (list, tuple) ) and len(a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , a : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , a : tuple[int, int] , a : float ):
'''simple docstring'''
assert self.validate_indicies(a )
lowerCAmelCase__ : Any = value
def __add__( self : Dict , a : Matrix ):
'''simple docstring'''
assert isinstance(a , a )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase__ : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Optional[int] = -self[r, c]
return result
def __sub__( self : List[str] , a : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : List[Any] , a : int | float | Matrix ):
'''simple docstring'''
if isinstance(a , (int, float) ): # Scalar multiplication
lowerCAmelCase__ : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Optional[int] = self[r, c] * another
return result
elif isinstance(a , a ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase__ : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase__ : Dict = f'''Unsupported type given for another ({type(a )})'''
raise TypeError(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : int = self[r, c]
return result
def _lowerCamelCase ( self : Optional[Any] , a : Matrix , a : Matrix ):
'''simple docstring'''
assert isinstance(a , a ) and isinstance(a , a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase__ : List[Any] = v.transpose()
lowerCAmelCase__ : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase__ ( ) -> None:
# a^(-1)
lowerCAmelCase__ : int = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase__ : Dict = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 1, 2, -3
lowerCAmelCase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
def lowerCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
testa()
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 22 ) -> int:
lowerCAmelCase__ : int = range(1 , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = range(1 , SCREAMING_SNAKE_CASE_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 307
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""_T""")
class A__ ( Generic[_T] ):
def __init__( self : Optional[int] , a : Iterable[_T] | None = None ):
'''simple docstring'''
lowerCAmelCase__ : list[_T] = list(iterable or [] )
lowerCAmelCase__ : list[_T] = []
def __len__( self : int ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ):
'''simple docstring'''
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def _lowerCamelCase ( self : List[str] , a : _T ):
'''simple docstring'''
self._stacka.append(a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self._stacka.pop
lowerCAmelCase__ : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 307
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for part_id in partition_order:
lowerCAmelCase__ : Tuple = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Optional[int] = spark.range(100 ).repartition(1 )
lowerCAmelCase__ : Optional[Any] = Spark(SCREAMING_SNAKE_CASE_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Tuple = spark.range(10 ).repartition(2 )
lowerCAmelCase__ : Optional[Any] = [1, 0]
lowerCAmelCase__ : Optional[int] = _generate_iterable_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Reverse the partitions.
lowerCAmelCase__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : List[Any] = spark.range(10 ).repartition(1 )
lowerCAmelCase__ : List[Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : List[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
lowerCAmelCase__ : str = lambda SCREAMING_SNAKE_CASE_ : x.reverse()
lowerCAmelCase__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [2, 1, 0] )
lowerCAmelCase__ : Tuple = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shuffle_data_sources(SCREAMING_SNAKE_CASE_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : str = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCAmelCase__ : Any = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCAmelCase__ : Any = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ , lowerCAmelCase__ : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> str:
lowerCAmelCase__ : List[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Any = spark.range(100 ).repartition(1 )
lowerCAmelCase__ : Optional[Any] = Spark(SCREAMING_SNAKE_CASE_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 307
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 1
|
import math
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = input('Enter message: ' )
lowerCAmelCase__ : Dict = int(input(F'''Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: ''' ) )
lowerCAmelCase__ : Optional[Any] = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
lowerCAmelCase__ : Union[str, Any] = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith('d' ):
lowerCAmelCase__ : List[Any] = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Optional[int] = [''] * key
for col in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = col
while pointer < len(SCREAMING_SNAKE_CASE_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key )
lowerCAmelCase__ : Optional[int] = key
lowerCAmelCase__ : List[str] = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = [''] * num_cols
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase__ : List[Any] = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 307
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowerCAmelCase__ : Optional[int] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class A__ ( Generic[T] ):
def __init__( self : Dict , a : bool = True ):
'''simple docstring'''
lowerCAmelCase__ : dict[T, list[T]] = {} # dictionary of lists
lowerCAmelCase__ : Optional[int] = directed
def _lowerCamelCase ( self : str , a : T , a : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
self.adj_list[destination_vertex].append(a )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowerCAmelCase__ : str = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a )
lowerCAmelCase__ : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCAmelCase__ : Tuple = [destination_vertex]
lowerCAmelCase__ : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowerCAmelCase__ : Optional[Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCAmelCase__ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCAmelCase__ : List[str] = [destination_vertex]
lowerCAmelCase__ : Dict = []
return self
def __repr__( self : str ):
'''simple docstring'''
return pformat(self.adj_list )
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 1
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE_ ) -> None:
nonlocal batch_size
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase__ : Any = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase__ : List[str] = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary":
lowerCAmelCase__ : Optional[int] = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCAmelCase , _UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class A__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[int] = None , **a : Tuple , ):
'''simple docstring'''
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
lowerCAmelCase__ : Any = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths}
lowerCAmelCase__ : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1]
lowerCAmelCase__ : Union[str, Any] = Parquet(
cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : str = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class A__ :
def __init__( self : List[Any] , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = dataset
lowerCAmelCase__ : Optional[Any] = path_or_buf
lowerCAmelCase__ : List[Any] = batch_size or get_writer_batch_size(dataset.features )
lowerCAmelCase__ : Tuple = parquet_writer_kwargs
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
lowerCAmelCase__ : Union[str, Any] = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs )
else:
lowerCAmelCase__ : Optional[Any] = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self : List[str] , a : BinaryIO , a : int , **a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , __a )
lowerCAmelCase__ : List[str] = self.dataset.features.arrow_schema
lowerCAmelCase__ : Tuple = pq.ParquetWriter(__a , schema=__a , **__a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
lowerCAmelCase__ : Dict = query_table(
table=self.dataset._data , key=slice(__a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__a )
written += batch.nbytes
writer.close()
return written
| 350
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 0
|
from datetime import datetime as dt
import os
from github import Github
lowerCamelCase__ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Any = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase__ : Dict = g.get_repo('huggingface/transformers' )
lowerCAmelCase__ : List[str] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase__ : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE_ : i.created_at , reverse=lowercase__ )
lowerCAmelCase__ : Any = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , a : Any , a : Optional[Any]=13 , a : Optional[int]=7 , a : int=True , a : Dict=True , a : str=True , a : Optional[int]=True , a : List[str]=99 , a : List[str]=32 , a : int=5 , a : Dict=4 , a : Dict=37 , a : str="gelu" , a : Tuple=0.1 , a : Optional[Any]=0.1 , a : Any=512 , a : Tuple=16 , a : Union[str, Any]=2 , a : str=0.0_2 , a : List[Any]=4 , ):
'''simple docstring'''
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : List[Any] = use_attention_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Optional[int] = num_choices
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_attention_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class A__ ( _UpperCAmelCase , unittest.TestCase ):
lowercase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class_name.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase__ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowerCAmelCase__ : int = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowerCAmelCase__ : List[str] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 352
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A__ ( a__ ):
lowercase = None
lowercase = None
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'feature_size' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'padding_value' ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCamelCase ) == len(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , processed_features[input_name] ) ) )
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCAmelCase__ : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase )
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : int = feat_extract.model_input_names[0]
lowerCAmelCase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCAmelCase__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase )
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCAmelCase__ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict=False ):
'''simple docstring'''
def _inputs_have_equal_length(a : str ):
lowerCAmelCase__ : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(a : str , a : Tuple ):
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ):
if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ : List[str] = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ : Tuple = self.feat_extract_tester.batch_size
lowerCAmelCase__ : Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ : List[str] = feat_extract.pad(_lowerCamelCase , padding=_lowerCamelCase )
lowerCAmelCase__ : str = input_a[input_name]
lowerCAmelCase__ : Union[str, Any] = feat_extract.pad(_lowerCamelCase , padding='longest' )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding='max_length' )[input_name]
lowerCAmelCase__ : List[str] = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=_lowerCamelCase , return_tensors='np' )
lowerCAmelCase__ : List[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : List[str] = feat_extract.pad(_lowerCamelCase , pad_to_multiple_of=10 )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='longest' , pad_to_multiple_of=10 )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
_lowerCamelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_lowerCamelCase )
lowerCAmelCase__ : Tuple = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
_lowerCamelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=_lowerCamelCase , return_tensors='np' , )
lowerCAmelCase__ : List[str] = input_a[input_name]
self.assertTrue(all(len(_lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _lowerCamelCase ( self : Any , a : str=False ):
'''simple docstring'''
def _inputs_have_equal_length(a : str ):
lowerCAmelCase__ : Dict = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(a : Tuple , a : int ):
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ):
if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase )
lowerCAmelCase__ : List[str] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ : List[str] = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_lowerCamelCase )
lowerCAmelCase__ : Any = input_a[input_name]
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ : List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
# truncate to smallest with np
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_lowerCamelCase , )
lowerCAmelCase__ : str = input_a[input_name]
lowerCAmelCase__ : str = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
# truncate to middle
lowerCAmelCase__ : Optional[Any] = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase , return_tensors='np' , )
lowerCAmelCase__ : str = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , truncation=_lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding='longest' , truncation=_lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding='longest' , truncation=_lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding='max_length' , truncation=_lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : str = 12
lowerCAmelCase__ : str = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , truncation=_lowerCamelCase , )
lowerCAmelCase__ : Tuple = input_a[input_name]
lowerCAmelCase__ : str = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , )
lowerCAmelCase__ : Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ : Tuple = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self._check_padding(numpify=_lowerCamelCase )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self._check_padding(numpify=_lowerCamelCase )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_lowerCamelCase )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
self._check_truncation(numpify=_lowerCamelCase )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : str = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Tuple = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' )[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.feat_extract_dict
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = self.feature_extraction_class(**_lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[str] = [len(_lowerCamelCase ) for x in speech_inputs]
lowerCAmelCase__ : int = feat_extract.model_input_names[0]
lowerCAmelCase__ : Tuple = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Any = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCamelCase )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.feat_extract_dict
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**_lowerCamelCase )
lowerCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Dict = [len(_lowerCamelCase ) for x in speech_inputs]
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : str = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Optional[Any] = min(_lowerCamelCase )
lowerCAmelCase__ : int = feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 354
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCamelCase__ = TypeVar("""T""")
lowerCamelCase__ = Union[List[T], Tuple[T, ...]]
lowerCamelCase__ = Union[T, List[T], Dict[str, T]]
lowerCamelCase__ = Union[str, bytes, os.PathLike]
| 355
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase__ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase__ : str = bs[:]
lowerCAmelCase__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Optional[Any] = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = set()
lowerCAmelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class A__ ( lowerCamelCase__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , a : Optional[int] , a : List[Any] , a : Any="replace" , a : Union[str, Any]="<s>" , a : List[str]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : Optional[int]="<unk>" , a : int="<pad>" , a : Optional[Any]="<mask>" , a : Dict=False , **a : List[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowerCAmelCase__ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowerCAmelCase__ : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowerCAmelCase__ : Optional[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(a )
lowerCAmelCase__ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase__ : Optional[int] = bytes_to_unicode()
lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ : str = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase__ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Dict = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Union[str, Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self : List[str] , a : Union[str, Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Optional[Any] = tuple(a )
lowerCAmelCase__ : str = get_pairs(a )
if not pairs:
return token
while True:
lowerCAmelCase__ : Optional[int] = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = bigram
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : str = 0
while i < len(a ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Union[str, Any] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(a )
lowerCAmelCase__ : Any = new_word
if len(a ) == 1:
break
else:
lowerCAmelCase__ : Dict = get_pairs(a )
lowerCAmelCase__ : Dict = ' '.join(a )
lowerCAmelCase__ : Optional[Any] = word
return word
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = []
for token in re.findall(self.pat , a ):
lowerCAmelCase__ : str = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def _lowerCamelCase ( self : Tuple , a : str ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : Any , a : Optional[int] ):
'''simple docstring'''
return self.decoder.get(a )
def _lowerCamelCase ( self : Dict , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = ''.join(a )
lowerCAmelCase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _lowerCamelCase ( self : List[Any] , a : str , a : Dict = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowerCAmelCase__ : int = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase__ : str = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : str = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : str = None , a : List[Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : Optional[int] , a : str , a : Dict = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Any , a : Optional[int] , a : List[Any]=False , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : List[Any] = ' ' + text
return (text, kwargs)
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = 10
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [1, 2, 3, 4]
lowerCAmelCase__ : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCAmelCase__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCAmelCase__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = process_story(_snake_case )
self.assertEqual(_snake_case , [] )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = process_story(_snake_case )
self.assertEqual(_snake_case , [] )
self.assertEqual(_snake_case , [] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = process_story(_snake_case )
lowerCAmelCase__ : Union[str, Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_snake_case , _snake_case )
lowerCAmelCase__ : Dict = ['It was the best of times.']
self.assertEqual(_snake_case , _snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = torch.tensor([1, 2, 3, 4] )
lowerCAmelCase__ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_snake_case , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCAmelCase__ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCAmelCase__ : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 101
lowerCAmelCase__ : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowerCAmelCase__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCAmelCase__ : str = compute_token_type_ids(_snake_case , _snake_case )
np.testing.assert_array_equal(_snake_case , _snake_case )
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.