code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
snake_case : Tuple = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = ['''input_features''', '''attention_mask''']
def __init__( self :int ,__snake_case :Tuple=80 ,__snake_case :Tuple=1_60_00 ,__snake_case :str=0.0 ,__snake_case :Tuple=10 ,__snake_case :Dict=25 ,__snake_case :Any="hamming_window" ,__snake_case :int=3_27_68.0 ,__snake_case :List[str]=0.97 ,__snake_case :int=1.0 ,__snake_case :int=True ,__snake_case :Optional[Any]=True ,__snake_case :List[str]=False ,**__snake_case :List[str] ,) -> Tuple:
super().__init__(feature_size=__snake_case ,sampling_rate=__snake_case ,padding_value=__snake_case ,**__snake_case )
a__ = feature_size
a__ = sampling_rate
a__ = padding_value
a__ = hop_length
a__ = win_length
a__ = frame_signal_scale
a__ = preemphasis_coeff
a__ = mel_floor
a__ = normalize_means
a__ = normalize_vars
a__ = win_function
a__ = return_attention_mask
a__ = win_length * sampling_rate // 10_00
a__ = hop_length * sampling_rate // 10_00
a__ = optimal_fft_length(self.sample_size )
a__ = (self.n_fft // 2) + 1
def lowerCamelCase__( self :int ,__snake_case :np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
a__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=__snake_case )
else:
a__ = window_function(window_length=self.sample_size ,name=self.win_function )
a__ = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
a__ = spectrogram(
one_waveform * self.frame_signal_scale ,window=__snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=__snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=__snake_case ,mel_floor=self.mel_floor ,log_mel='log' ,)
return msfc_features.T
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Any ) -> int:
# make sure we normalize float32 arrays
if self.normalize_means:
a__ = x[:input_length].mean(axis=0 )
a__ = np.subtract(__snake_case ,__snake_case )
if self.normalize_vars:
a__ = x[:input_length].std(axis=0 )
a__ = np.divide(__snake_case ,__snake_case )
if input_length < x.shape[0]:
a__ = padding_value
# make sure array is in float32
a__ = x.astype(np.floataa )
return x
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[np.ndarray] ,__snake_case :Optional[np.ndarray] = None ) -> List[np.ndarray]:
a__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__snake_case ,__snake_case ,self.padding_value ) for x, n in zip(__snake_case ,__snake_case )]
def __call__( self :str ,__snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__snake_case :Union[bool, str, PaddingStrategy] = False ,__snake_case :Optional[int] = None ,__snake_case :bool = False ,__snake_case :Optional[int] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[int] = None ,**__snake_case :Union[str, Any] ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a__ = isinstance(__snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
a__ = is_batched_numpy or (
isinstance(__snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case ,np.ndarray ):
a__ = np.asarray(__snake_case ,dtype=np.floataa )
elif isinstance(__snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ = [raw_speech]
# extract fbank features
a__ = [self._extract_mfsc_features(__snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
a__ = BatchFeature({'input_features': features} )
a__ = self.pad(
__snake_case ,padding=__snake_case ,max_length=__snake_case ,truncation=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
# make sure list is in array format
a__ = padded_inputs.get('input_features' )
if isinstance(input_features[0] ,__snake_case ):
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for feature in input_features]
a__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
a__ = [np.asarray(__snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
a__ = (
np.array(__snake_case ,dtype=np.intaa )
if self._get_padding_strategies(__snake_case ,max_length=__snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
a__ = self.normalize(
padded_inputs['input_features'] ,attention_mask=__snake_case )
if return_tensors is not None:
a__ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 240 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : str = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''mobilenet_v2'''
def __init__( self :Optional[Any] ,__snake_case :List[Any]=3 ,__snake_case :Any=2_24 ,__snake_case :Dict=1.0 ,__snake_case :str=8 ,__snake_case :Union[str, Any]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :str=32 ,__snake_case :Tuple=True ,__snake_case :Union[str, Any]=True ,__snake_case :Any="relu6" ,__snake_case :List[str]=True ,__snake_case :Dict=0.8 ,__snake_case :Optional[int]=0.02 ,__snake_case :Tuple=0.0_01 ,__snake_case :Dict=2_55 ,**__snake_case :Dict ,) -> Any:
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
a__ = num_channels
a__ = image_size
a__ = depth_multiplier
a__ = depth_divisible_by
a__ = min_depth
a__ = expand_ratio
a__ = output_stride
a__ = first_layer_is_expansion
a__ = finegrained_output
a__ = hidden_act
a__ = tf_padding
a__ = classifier_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = semantic_loss_ignore_index
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase__( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase__( self :Any ) -> float:
return 1E-4
| 240 | 1 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = 0 ):
lowercase :Optional[int] = length or len(_UpperCamelCase )
lowercase :Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase , lowercase :str = list_data[i + 1], list_data[i]
lowercase :Tuple = True
return list_data if not swapped else bubble_sort(_UpperCamelCase, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, **lowerCamelCase ):
lowercase :List[Any] = AutoConfig.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowercase :Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 158 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = logging.get_logger()
# the current default level is logging.WARNING
snake_case_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = logging.get_verbosity()
snake_case_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
snake_case_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case_ = os.getenv("TRANSFORMERS_VERBOSITY" , a__ )
snake_case_ = logging.log_levels[env_level_str]
snake_case_ = logging.get_verbosity()
self.assertEqual(
a__ , a__ , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
snake_case_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
snake_case_ = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
snake_case_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
snake_case_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + "\n" )
def UpperCamelCase_( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 85 |
def _A ( ):
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Optional[int] = 1
UpperCamelCase :List[Any] = 2
while i * i <= n:
UpperCamelCase :str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 259 | 0 |
'''simple docstring'''
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :int = len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
UpperCamelCase__ :Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__snake_case =input('''Enter numbers separated by a comma:\n''').strip()
__snake_case =[int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted)) | 371 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = AltDiffusionPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCamelCase__ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase__ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCamelCase__ :Optional[Any] = CLIPTextModel(UpperCamelCase_ )
UpperCamelCase__ :int = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ :Dict = 77
UpperCamelCase__ :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Tuple = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase__ :Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase__ :Union[str, Any] = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = text_encoder
UpperCamelCase__ :Any = AltDiffusionPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Dict = '''A photo of an astronaut'''
UpperCamelCase__ :Union[str, Any] = alt_pipe(**UpperCamelCase_ )
UpperCamelCase__ :Any = output.images
UpperCamelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :List[Any] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :int = self.get_dummy_components()
UpperCamelCase__ :Union[str, Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
UpperCamelCase__ :Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase__ :str = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = text_encoder
UpperCamelCase__ :Union[str, Any] = AltDiffusionPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = alt_pipe(**UpperCamelCase_ )
UpperCamelCase__ :str = output.images
UpperCamelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ :List[Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :str = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :int = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
UpperCamelCase__ :Union[str, Any] = output.images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
UpperCamelCase__ :List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :Dict = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
UpperCamelCase__ :Any = output.images
UpperCamelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :int = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 219 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Any:
_UpperCAmelCase : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape
_UpperCAmelCase : Union[str, Any] = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Dict="facebook/mbart-large-en-ro", _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
_UpperCAmelCase : int = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase : str = MBartConfig.from_pretrained(_lowerCAmelCase, vocab_size=_lowerCAmelCase )
if mbart_aa and finetuned:
_UpperCAmelCase : Any = """relu"""
_UpperCAmelCase : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase : Any = MBartForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase )
if finetuned:
_UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCamelCase__ : str = parser.parse_args()
lowerCamelCase__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 246 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ : str = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ : Union[str, Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
_UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ), dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : int ) -> Optional[Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
_UpperCAmelCase : Optional[int] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = bytestream.read(rows * cols * num_images )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
_UpperCAmelCase : Any = data.reshape(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 1 )
return data
@deprecated(_lowerCAmelCase, """Please use tf.one_hot on tensors.""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple ) -> Union[str, Any]:
_UpperCAmelCase : int = labels_dense.shape[0]
_UpperCAmelCase : Any = numpy.arange(_lowerCAmelCase ) * num_classes
_UpperCAmelCase : Tuple = numpy.zeros((num_labels, num_classes) )
_UpperCAmelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Optional[Any]=10 ) -> Union[str, Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
_UpperCAmelCase : str = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Dict = bytestream.read(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase, _lowerCAmelCase )
return labels
class _UpperCAmelCase :
@deprecated(
_A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_UpperCAmelCase : Tuple = dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
_UpperCAmelCase : Union[str, Any] = 1_00_00
_UpperCAmelCase : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_UpperCAmelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_UpperCAmelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_UpperCAmelCase : Dict = images.astype(numpy.floataa )
_UpperCAmelCase : Any = numpy.multiply(_A , 1.0 / 255.0 )
_UpperCAmelCase : Union[str, Any] = images
_UpperCAmelCase : List[Any] = labels
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[Any] = 0
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._images
@property
def __snake_case ( self ) -> Any:
'''simple docstring'''
return self._labels
@property
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._epochs_completed
def __snake_case ( self , _A , _A=False , _A=True ) -> Tuple:
'''simple docstring'''
if fake_data:
_UpperCAmelCase : int = [1] * 7_84
_UpperCAmelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_UpperCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : List[Any] = self.images[perma]
_UpperCAmelCase : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_UpperCAmelCase : List[Any] = self._num_examples - start
_UpperCAmelCase : str = self._images[start : self._num_examples]
_UpperCAmelCase : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : Optional[int] = self.images[perm]
_UpperCAmelCase : str = self.labels[perm]
# Start next epoch
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = batch_size - rest_num_examples
_UpperCAmelCase : Union[str, Any] = self._index_in_epoch
_UpperCAmelCase : Optional[int] = self._images[start:end]
_UpperCAmelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase, """Please write your own downloading logic.""" )
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase, _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
_UpperCAmelCase : Optional[int] = f.size()
print("""Successfully downloaded""", _lowerCAmelCase, _lowerCAmelCase, """bytes.""" )
return filepath
@deprecated(
_lowerCAmelCase, """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : str=False, _lowerCAmelCase : List[str]=False, _lowerCAmelCase : Tuple=dtypes.floataa, _lowerCAmelCase : List[str]=True, _lowerCAmelCase : Union[str, Any]=5000, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : int=DEFAULT_SOURCE_URL, ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_lowerCAmelCase, one_hot=_lowerCAmelCase, dtype=_lowerCAmelCase, seed=_lowerCAmelCase )
_UpperCAmelCase : List[Any] = fake()
_UpperCAmelCase : int = fake()
_UpperCAmelCase : Any = fake()
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
if not source_url: # empty string check
_UpperCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL
_UpperCAmelCase : Optional[int] = """train-images-idx3-ubyte.gz"""
_UpperCAmelCase : int = """train-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
_UpperCAmelCase : Tuple = """t10k-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Tuple = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Any = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Union[str, Any] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : List[Any] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
_UpperCAmelCase : int = (
"""Validation size should be between 0 and """
f'''{len(_lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase : str = train_images[:validation_size]
_UpperCAmelCase : Union[str, Any] = train_labels[:validation_size]
_UpperCAmelCase : Optional[Any] = train_images[validation_size:]
_UpperCAmelCase : Optional[int] = train_labels[validation_size:]
_UpperCAmelCase : Optional[int] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
_UpperCAmelCase : Tuple = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : Dict = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : List[Any] = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
| 246 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : Union[str, Any] = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : Optional[int] = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = True
lowerCamelCase__ : Dict = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, )
lowerCamelCase__ : List[str] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, )
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[str] = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_, )
lowerCamelCase__ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Optional[int] = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : List[str] = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
lowerCamelCase__ : List[str] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, encoder_hidden_states=lowerCamelCase_, encoder_attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : str = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ : Union[str, Any] = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[int] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = LlamaModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = 3
lowerCamelCase__ : Dict = input_dict['input_ids']
lowerCamelCase__ : Any = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCamelCase__ : str = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowerCamelCase__ : List[Any] = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : Optional[Any] = 'single_label_classification'
lowerCamelCase__ : Tuple = input_dict['input_ids']
lowerCamelCase__ : str = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Any = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3
lowerCamelCase__ : List[Any] = 'multi_label_classification'
lowerCamelCase__ : Union[str, Any] = input_dict['input_ids']
lowerCamelCase__ : Dict = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Optional[int] = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Union[str, Any] = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : Dict = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Tuple = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : List[Any] = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Dict = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : str = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase__ : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf', device_map='auto' )
lowerCamelCase__ : Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCamelCase__ : Union[str, Any] = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase_, atol=1e-2, rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase__ : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0], lowerCamelCase_, atol=1e-5, rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase__ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf', device_map='auto' )
lowerCamelCase__ : Optional[int] = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
lowerCamelCase__ : Tuple = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase_, atol=1e-2, rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase__ : int = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0], lowerCamelCase_, atol=1e-5, rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase__ : Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf', device_map='auto' )
lowerCamelCase__ : Dict = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
lowerCamelCase__ : Dict = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase_, atol=1e-2, rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase__ : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase_, atol=1e-2, rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase__ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf', device_map='auto' )
lowerCamelCase__ : Union[str, Any] = model(torch.tensor(lowerCamelCase_ ) )
lowerCamelCase__ : Any = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]], dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ), lowerCamelCase_, atol=1e-2, rtol=1e-2 )
# fmt: off
lowerCamelCase__ : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0], lowerCamelCase_, atol=1e-5, rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCamelCase__ : Optional[int] = 'Simply put, the theory of relativity states that '
lowerCamelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase_, return_tensors='pt' )
lowerCamelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf', device_map='sequential', use_safetensors=lowerCamelCase_ )
# greedy generation outputs
lowerCamelCase__ : int = model.generate(lowerCamelCase_, max_new_tokens=6_4, top_p=lowerCamelCase_, temperature=1, do_sample=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(generated_ids[0], skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 316 |
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 153 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**UpperCamelCase_ )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = '''A red cartoon frog, 4k'''
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase__ = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
def __init__( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=99 , _lowerCamelCase : Tuple=13 , _lowerCamelCase : Tuple=7 , _lowerCamelCase : Tuple=9 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Any=32 , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : str=4 , _lowerCamelCase : int=37 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.0_0_2 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : str=None , _lowerCamelCase : int=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = encoder_seq_length
_snake_case = decoder_seq_length
# For common tests
_snake_case = self.decoder_seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = d_ff
_snake_case = relative_attention_num_buckets
_snake_case = dropout_rate
_snake_case = initializer_factor
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = decoder_start_token_id
_snake_case = None
_snake_case = decoder_layers
def lowercase ( self : Union[str, Any] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowercase ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Union[str, Any]=None , ):
if attention_mask is None:
_snake_case = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_snake_case = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_snake_case = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_lowerCamelCase )
if decoder_head_mask is None:
_snake_case = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_lowerCamelCase )
if cross_attn_head_mask is None:
_snake_case = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase ( self : Tuple ):
_snake_case = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_snake_case = input_ids.clamp(self.pad_token_id + 1 )
_snake_case = decoder_input_ids.clamp(self.pad_token_id + 1 )
_snake_case = self.get_config()
_snake_case = config.num_attention_heads
_snake_case = self.prepare_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, input_dict
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[int] ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self : Union[str, Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , ):
_snake_case = UMTaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , )
_snake_case = model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
_snake_case = result.last_hidden_state
_snake_case = result.past_key_values
_snake_case = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , ):
_snake_case = UMTaModel(config=_lowerCamelCase ).get_decoder().to(_lowerCamelCase ).eval()
# first forward pass
_snake_case = model(_lowerCamelCase , use_cache=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
_snake_case = model(_lowerCamelCase , use_cache=_lowerCamelCase )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) + 1 )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case = model(_lowerCamelCase )['''last_hidden_state''']
_snake_case = model(_lowerCamelCase , past_key_values=_lowerCamelCase )['''last_hidden_state''']
# select random slice
_snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
_snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def lowercase ( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , ):
_snake_case = UMTaModel(config=_lowerCamelCase ).to(_lowerCamelCase ).half().eval()
_snake_case = model(**_lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_lowerCamelCase ).any().item() )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
__a = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__a = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__a = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__a = True
__a = False
__a = False
__a = True
__a = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__a = [0.8, 0.9]
def lowercase ( self : int ):
_snake_case = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = UMTaModel(config_and_inputs[0] ).to(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowercase ( self : Optional[Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = config_and_inputs[0]
_snake_case = UMTaForConditionalGeneration(_lowerCamelCase ).eval()
model.to(_lowerCamelCase )
_snake_case = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCamelCase ),
}
for attn_name, (name, mask) in zip(_lowerCamelCase , head_masking.items() ):
_snake_case = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_snake_case = torch.ones(
config.num_decoder_layers , config.num_heads , device=_lowerCamelCase )
_snake_case = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_lowerCamelCase , return_dict_in_generate=_lowerCamelCase , **_lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_snake_case = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowercase ( self : Optional[Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowercase ( self : Union[str, Any] ):
_snake_case = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_lowerCamelCase , legacy=_lowerCamelCase )
_snake_case = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' , padding=_lowerCamelCase ).input_ids
# fmt: off
_snake_case = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_lowerCamelCase , _lowerCamelCase )
_snake_case = model.generate(input_ids.to(_lowerCamelCase ) )
_snake_case = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 355 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__lowerCamelCase ) * abs(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 32 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = TextToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'np'
a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : Optional[Any] = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Tuple = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 195 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Dict = "ctrl"
snake_case__ : int = ["past_key_values"]
snake_case__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , UpperCAmelCase__ : Any=2_4_6_5_3_4 , UpperCAmelCase__ : List[Any]=2_5_6 , UpperCAmelCase__ : Optional[int]=1_2_8_0 , UpperCAmelCase__ : Optional[Any]=8_1_9_2 , UpperCAmelCase__ : int=4_8 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=1E-6 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) -> str:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = dff
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(**UpperCAmelCase__ )
| 195 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = Mock()
lowerCamelCase_ = conn, Mock()
lowerCamelCase_ = iter([1, None] )
lowerCamelCase_ = lambda lowercase : next(lowercase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 204 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] ):
'''simple docstring'''
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ , lowerCamelCase_ = pil_image.size
lowerCamelCase_ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='dict' , config=lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCamelCase_ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowerCamelCase_ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowerCamelCase_ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCamelCase_ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowerCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : float = 1 / 255 , A_ : bool = True , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_value
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCamelCase_ = apply_ocr
lowerCamelCase_ = ocr_lang
lowerCamelCase_ = tesseract_config
def a__ ( self : str , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCamelCase_ = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, Iterable[float]] , A_ : Union[float, Iterable[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Dict=None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Union[float, Iterable[float]] = None , A_ : Union[float, Iterable[float]] = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for image in images:
lowerCamelCase_ , lowerCamelCase_ = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
lowerCamelCase_ = words_batch
lowerCamelCase_ = boxes_batch
return data
| 204 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : Dict = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_snake_case : List[str] = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = 'mask2former'
lowercase_ = ['swin']
lowercase_ = {'hidden_size': 'hidden_dim'}
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Dict] = None , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 10_24 , lowerCAmelCase_ : str = "relu" , lowerCAmelCase_ : int = 6 , lowerCAmelCase_ : int = 10 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 20_48 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 2_55 , lowerCAmelCase_ : int = 1_00 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 2.0 , lowerCAmelCase_ : float = 5.0 , lowerCAmelCase_ : float = 5.0 , lowerCAmelCase_ : int = 1_25_44 , lowerCAmelCase_ : float = 3.0 , lowerCAmelCase_ : float = 0.7_5 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : List[int] = [4, 8, 16, 32] , lowerCAmelCase_ : bool = None , **lowerCAmelCase_ : Tuple , ) -> str:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
_a = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCAmelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(lowerCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
F'Supported model types: {",".join(self.backbones_supported )}' )
_a = backbone_config
_a = feature_size
_a = mask_feature_size
_a = hidden_dim
_a = encoder_feedforward_dim
_a = activation_function
_a = encoder_layers
_a = decoder_layers
_a = num_attention_heads
_a = dropout
_a = dim_feedforward
_a = pre_norm
_a = enforce_input_projection
_a = common_stride
_a = ignore_value
_a = num_queries
_a = no_object_weight
_a = class_weight
_a = mask_weight
_a = dice_weight
_a = train_num_points
_a = oversample_ratio
_a = importance_sample_ratio
_a = init_std
_a = init_xavier_std
_a = use_auxiliary_loss
_a = feature_strides
_a = output_auxiliary_logits
_a = decoder_layers
super().__init__(**lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict[str, any]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 366 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : list[int] ):
'''simple docstring'''
if not nums:
return 0
_a = nums[0]
_a = 0
for num in nums[1:]:
_a , _a = (
max_excluding + num,
max(UpperCamelCase , UpperCamelCase ),
)
return max(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 | 0 |
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase : Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__lowerCAmelCase : Any = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _lowercase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=False ) -> Any:
"""simple docstring"""
__magic_name__ = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 88 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int:
# Initialise PyTorch model
UpperCamelCase : Any = MobileBertConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : Tuple = MobileBertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
UpperCamelCase : int = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 119 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCAmelCase__ : Optional[Any] = [True] * (num + 1)
UpperCAmelCase__ : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCAmelCase ):
UpperCAmelCase__ : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 364 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : List[str] = eval_examples
UpperCAmelCase__ : List[Any] = post_process_function
def _a (self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = gen_kwargs.copy()
UpperCAmelCase__ : Optional[Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase__ : int = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase__ : int = gen_kwargs
UpperCAmelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : Tuple = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : List[str] = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : int = eval_loop(
_lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : Any = compute_metrics
UpperCAmelCase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase__ : List[str] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : Tuple = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase__ : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = gen_kwargs.copy()
UpperCAmelCase__ : List[Any] = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Any = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = time.time()
UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : List[str] = eval_loop(
_lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : int = compute_metrics
UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : Optional[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , """predict""" )
UpperCAmelCase__ : List[str] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : str = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 166 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """detr"""
a__ = ["""past_key_values"""]
a__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : List[Any] , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : int=100 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : str=2048 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=2048 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : List[str]="relu" , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any="sine" , UpperCamelCase__ : Optional[Any]="resnet50" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : List[str]=0.1 , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__magic_name__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = backbone_config.get("""model_type""" )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : PretrainedConfig , **UpperCamelCase__ : str ) -> int:
"""simple docstring"""
return cls(backbone_config=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Dict[str, any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__magic_name__ = self.backbone_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = version.parse("""1.11""" )
@property
def _lowercase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase ( self : Tuple ) -> float:
"""simple docstring"""
return 1E-5
@property
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return 12
| 88 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
snake_case_ = logging.getLogger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = git.Repo(search_parent_directories=lowercase_ )
UpperCAmelCase = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def _lowerCAmelCase ( lowercase_ ):
if params.n_gpu <= 0:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = True
UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
UpperCAmelCase = params.world_size // params.n_gpu_per_node
UpperCAmelCase = params.global_rank // params.n_gpu_per_node
UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 1
UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase = params.n_nodes > 1
# summary
UpperCAmelCase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _lowerCAmelCase ( lowercase_ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 78 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = ["speech"]
def __init__( self , *A_ , **A_ ) -> Any:
requires_backends(self , ['speech'] )
class UpperCAmelCase__ ( metaclass=A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["speech"]
def __init__( self , *A_ , **A_ ) -> Union[str, Any]:
requires_backends(self , ['speech'] )
| 369 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ):
__UpperCamelCase =np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase =Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def _a ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
__UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase =dataset
__UpperCamelCase =path_or_buf
__UpperCamelCase =batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase =parquet_writer_kwargs
def _a ( self ) -> int:
__UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def _a ( self , A_ , A_ , **A_ ) -> int:
__UpperCamelCase =0
__UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ )
__UpperCamelCase =self.dataset.features.arrow_schema
__UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase =query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 117 | 0 |
import string
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Tuple = ''
for i in sequence:
A_ : List[Any] = ord(lowercase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = string.ascii_letters
A_ : Optional[int] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase__ )] if c in letters else c for c in sequence )
def UpperCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
A_ : List[Any] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=lowercase__ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' ,setup=lowercase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 140 |
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase= len(lowercase__ )
__lowercase= max(lowercase__ )
__lowercase= min(lowercase__ )
# create the counting array
__lowercase= coll_max + 1 - coll_min
__lowercase= [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
__lowercase= counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase= [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
__lowercase= collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 295 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : int = 'llama'
_SCREAMING_SNAKE_CASE : Tuple = ['past_key_values']
def __init__( self , _UpperCamelCase=32000 , _UpperCamelCase=4096 , _UpperCamelCase=11008 , _UpperCamelCase=32 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase="silu" , _UpperCamelCase=2048 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-6 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Dict = vocab_size
_lowercase : str = max_position_embeddings
_lowercase : Union[str, Any] = hidden_size
_lowercase : Any = intermediate_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowercase : Dict = num_attention_heads
_lowercase : Optional[Any] = num_key_value_heads
_lowercase : Any = hidden_act
_lowercase : str = initializer_range
_lowercase : Optional[Any] = rms_norm_eps
_lowercase : Optional[int] = pretraining_tp
_lowercase : int = use_cache
_lowercase : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , **_UpperCamelCase , )
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_lowercase : List[Any] = self.rope_scaling.get("type" , _UpperCamelCase )
_lowercase : Any = self.rope_scaling.get("factor" , _UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_UpperCamelCase , _UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 352 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_snake_case = logging.getLogger(__name__)
_snake_case = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = 'bertabs'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=512 , _UpperCamelCase=6 , _UpperCamelCase=512 , _UpperCamelCase=8 , _UpperCamelCase=512 , _UpperCamelCase=0.2 , _UpperCamelCase=6 , _UpperCamelCase=768 , _UpperCamelCase=8 , _UpperCamelCase=2048 , _UpperCamelCase=0.2 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : List[str] = vocab_size
_lowercase : Tuple = max_pos
_lowercase : int = enc_layers
_lowercase : str = enc_hidden_size
_lowercase : Optional[Any] = enc_heads
_lowercase : Union[str, Any] = enc_ff_size
_lowercase : Tuple = enc_dropout
_lowercase : Dict = dec_layers
_lowercase : List[str] = dec_hidden_size
_lowercase : List[str] = dec_heads
_lowercase : str = dec_ff_size
_lowercase : str = dec_dropout
| 199 | 0 |
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCamelCase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCamelCase : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 77 |
import os
from distutils.util import strtobool
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for e in env_keys:
SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 296 | 0 |
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('String lengths must match!' )
lowerCAmelCase__ = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 | import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['input_values', 'attention_mask']
def __init__( self : Union[str, Any] , lowercase__ : int = 1 , lowercase__ : int = 16_000 , lowercase__ : float = 0.0 , lowercase__ : bool = False , lowercase__ : int = 80 , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : str = "hann_window" , lowercase__ : float = 1.0 , lowercase__ : float = 80 , lowercase__ : float = 7_600 , lowercase__ : float = 1e-10 , lowercase__ : int = 2 , lowercase__ : bool = True , **lowercase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = win_length
lowerCAmelCase__ = win_function
lowerCAmelCase__ = frame_signal_scale
lowerCAmelCase__ = fmin
lowerCAmelCase__ = fmax
lowerCAmelCase__ = mel_floor
lowerCAmelCase__ = reduction_factor
lowerCAmelCase__ = win_length * sampling_rate // 1_000
lowerCAmelCase__ = hop_length * sampling_rate // 1_000
lowerCAmelCase__ = optimal_fft_length(self.sample_size)
lowerCAmelCase__ = (self.n_fft // 2) + 1
lowerCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase__)
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( lowercase__ : List[np.ndarray] , lowercase__ : List[np.ndarray] , lowercase__ : float = 0.0):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ = np.array(lowercase__ , np.intaa)
lowerCAmelCase__ = []
for vector, length in zip(lowercase__ , attention_mask.sum(-1)):
lowerCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
lowerCAmelCase__ = padding_value
normed_input_values.append(lowercase__)
else:
lowerCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __snake_case ( self : List[Any] , lowercase__ : np.ndarray , ):
'''simple docstring'''
lowerCAmelCase__ = spectrogram(
lowercase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : str , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if audio is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
else:
lowerCAmelCase__ = None
if audio_target is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase__ = inputs_target['input_values']
lowerCAmelCase__ = inputs_target.get('attention_mask')
if decoder_attention_mask is not None:
lowerCAmelCase__ = decoder_attention_mask
return inputs
def __snake_case ( self : Tuple , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : bool = False , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ = isinstance(lowercase__ , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
lowerCAmelCase__ = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase__ = [self._extract_mel_features(lowercase__) for waveform in speech]
lowerCAmelCase__ = BatchFeature({'input_values': features})
lowerCAmelCase__ = self.num_mel_bins
else:
lowerCAmelCase__ = BatchFeature({'input_values': speech})
lowerCAmelCase__ = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = feature_size_hack
# convert input values to correct format
lowerCAmelCase__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for array in input_values]
elif (
not isinstance(lowercase__ , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase__ = [array.astype(np.floataa) for array in input_values]
elif isinstance(lowercase__ , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase__ = padded_inputs.get('attention_mask')
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase__ = (
attention_mask
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase__ , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 119 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
set_seed(770)
__lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowercase = os.path.dirname(os.path.abspath(__file__))
__lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :str = model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , local_dir=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
'''simple docstring'''
if model_type == "text":
__UpperCamelCase :Tuple = BarkSemanticModel
__UpperCamelCase :List[str] = BarkSemanticConfig
__UpperCamelCase :Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase :int = BarkCoarseModel
__UpperCamelCase :Optional[Any] = BarkCoarseConfig
__UpperCamelCase :Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase :Any = BarkFineModel
__UpperCamelCase :Union[str, Any] = BarkFineConfig
__UpperCamelCase :Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase :Union[str, Any] = f"""{model_type}_small""" if use_small else model_type
__UpperCamelCase :Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__UpperCamelCase :List[str] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
# this is a hack
__UpperCamelCase :str = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__UpperCamelCase :Dict = model_args['''vocab_size''']
__UpperCamelCase :Dict = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase :Tuple = model_args.pop('''n_head''' )
__UpperCamelCase :Optional[Any] = model_args.pop('''n_embd''' )
__UpperCamelCase :List[Any] = model_args.pop('''n_layer''' )
__UpperCamelCase :Union[str, Any] = ConfigClass(**checkpoint['''model_args'''] )
__UpperCamelCase :str = ModelClass(config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = GenerationConfigClass()
__UpperCamelCase :Tuple = model_generation_config
__UpperCamelCase :str = checkpoint['''model''']
# fixup checkpoint
__UpperCamelCase :List[Any] = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase :Optional[Any] = k[len(SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase :Union[str, Any] = new_k.replace(SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
__UpperCamelCase :List[str] = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase :int = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__UpperCamelCase :int = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase :int = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = checkpoint['''best_val_loss'''].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase :List[Any] = '''cpu''' # do conversion on cpu
__UpperCamelCase :List[Any] = _get_ckpt_path(SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = _load_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
# load bark initial model
__UpperCamelCase :Optional[Any] = _bark_load_model(SCREAMING_SNAKE_CASE , '''cpu''' , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
if model_type == "text":
__UpperCamelCase :Dict = bark_model['''model''']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__UpperCamelCase :List[str] = 5
__UpperCamelCase :List[str] = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase :Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase :str = bark_model(SCREAMING_SNAKE_CASE )[0]
__UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE )
# take last logits
__UpperCamelCase :str = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase :Any = 3
__UpperCamelCase :List[Any] = 8
__UpperCamelCase :Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = bark_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :List[str] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :Tuple = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCamelCase :List[Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__UpperCamelCase :Union[str, Any] = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__UpperCamelCase :Tuple = BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase :int = BarkModel(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = semantic
__UpperCamelCase :Any = coarseAcoustic
__UpperCamelCase :Tuple = fineAcoustic
__UpperCamelCase :List[Any] = codec
__UpperCamelCase :int = bark_generation_config
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
bark.save_pretrained(SCREAMING_SNAKE_CASE , repo_id=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 43 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = {}, {}
if padding is not None:
snake_case : Optional[Any] = padding
if truncation is not None:
snake_case : Union[str, Any] = truncation
if top_k is not None:
snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = {"image": image, "question": question}
else:
snake_case : List[str] = image
snake_case : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : List[Any] = load_image(inputs["image"] )
snake_case : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : Optional[int] = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :int = """new-model"""
if is_tf_available():
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :str = NewModelConfig
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : Any ):
__magic_name__ = '''bert-base-cased'''
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = '''bert-base-cased'''
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForPreTraining.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : Optional[int] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForCausalLM.from_pretrained(a__ )
__magic_name__ , __magic_name__ = TFAutoModelForCausalLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : str ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForMaskedLM.from_pretrained(a__ )
__magic_name__ , __magic_name__ = TFAutoModelForMaskedLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : Any ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForSeqaSeqLM.from_pretrained(a__ )
__magic_name__ , __magic_name__ = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : Tuple ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForSequenceClassification.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def snake_case__ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForQuestionAnswering.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
@require_tensorflow_probability
def snake_case__ ( self : Optional[Any] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__magic_name__ = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__magic_name__ = TFAutoModelForTableQuestionAnswering.from_pretrained(a__ )
__magic_name__ , __magic_name__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
__magic_name__ = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def snake_case__ ( self : str ):
__magic_name__ = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def snake_case__ ( self : Union[str, Any] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__magic_name__ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a__ , a__ )
__magic_name__ = copy.deepcopy(model.config )
__magic_name__ = ['''FunnelBaseModel''']
__magic_name__ = TFAutoModel.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__magic_name__ = TFAutoModel.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Tuple ):
try:
AutoConfig.register('''new-model''' , a__ )
__magic_name__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
auto_class.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = BertModelTester(self ).get_config()
__magic_name__ = NewModelConfig(**tiny_config.to_dict() )
__magic_name__ = auto_class.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__magic_name__ = auto_class.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case__ ( self : Optional[int] ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = TFAutoModel.from_pretrained('''bert-base''' )
def snake_case__ ( self : List[Any] ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = TFAutoModel.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : int ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__magic_name__ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Any ):
with self.assertRaisesRegex(a__ , '''Use `from_pt=True` to load this model''' ):
__magic_name__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def snake_case__ ( self : Union[str, Any] ):
# Make sure we have cached the model.
__magic_name__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__magic_name__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__magic_name__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__magic_name__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 98 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE :Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case__ ( self : Tuple , a__ : Tuple , a__ : int , a__ : int ):
__magic_name__ = TextaTextGenerationPipeline(model=a__ , tokenizer=a__ )
return generator, ["Something to write", "Something else"]
def snake_case__ ( self : List[str] , a__ : List[Any] , a__ : List[str] ):
__magic_name__ = generator('''Something there''' )
self.assertEqual(a__ , [{'''generated_text''': ANY(a__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__magic_name__ = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
] , )
__magic_name__ = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
[{'''generated_text''': ANY(a__ )}, {'''generated_text''': ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
generator(4 )
@require_torch
def snake_case__ ( self : Any ):
__magic_name__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__magic_name__ = generator('''Something there''' , do_sample=a__ )
self.assertEqual(a__ , [{'''generated_text''': ''''''}] )
__magic_name__ = 3
__magic_name__ = generator(
'''Something there''' , num_return_sequences=a__ , num_beams=a__ , )
__magic_name__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(a__ , a__ )
__magic_name__ = generator('''This is a test''' , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = '''<pad>'''
__magic_name__ = generator(
['''This is a test''', '''This is a second test'''] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case__ ( self : int ):
__magic_name__ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__magic_name__ = generator('''Something there''' , do_sample=a__ )
self.assertEqual(a__ , [{'''generated_text''': ''''''}] )
| 98 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "falcon"
UpperCAmelCase_ = ["past_key_values"]
def __init__(self , __a=6_50_24 , __a=45_44 , __a=32 , __a=71 , __a=1e-5 , __a=0.02 , __a=True , __a=0.0 , __a=0.0 , __a=None , __a=False , __a=False , __a=True , __a=True , __a=False , __a=11 , __a=11 , **__a , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase = kwargs.pop("n_embed" , __a )
UpperCamelCase = hidden_size if n_embed is None else n_embed
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase = alibi
UpperCamelCase = new_decoder_architecture
UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase = parallel_attn
UpperCamelCase = bias
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case_ (self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def snake_case_ (self ) -> Dict:
return not self.alibi
| 244 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313 | 0 |
'''simple docstring'''
from functools import lru_cache
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 2
_snake_case = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 2
while True:
# Increment each value of a generated range
_snake_case = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_snake_case = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 4 ):
_snake_case = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution()) | 270 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCAmelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ) -> int:
_snake_case = d_model
_snake_case = parent
_snake_case = batch_size
_snake_case = prediction_length
_snake_case = context_length
_snake_case = cardinality
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = embedding_dimension
_snake_case = is_training
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = context_length
_snake_case = prediction_length + label_length
_snake_case = label_length
_snake_case = moving_average
_snake_case = autocorrelation_factor
def lowercase (self ) -> str:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase (self , UpperCAmelCase ) -> Tuple:
_snake_case = config.context_length + max(config.lags_sequence )
_snake_case = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_snake_case = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_snake_case = floats_tensor([self.batch_size, _past_length] )
_snake_case = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_snake_case = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_snake_case = floats_tensor([self.batch_size, config.prediction_length] )
_snake_case = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase (self ) -> int:
_snake_case = self.get_config()
_snake_case = self.prepare_autoformer_inputs_dict(UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_snake_case = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
_snake_case = model(**UpperCAmelCase )
_snake_case = outputs.encoder_last_hidden_state
_snake_case = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
_snake_case = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = model.create_network_inputs(**UpperCAmelCase )
_snake_case, _snake_case = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_snake_case = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_snake_case = encoder(inputs_embeds=UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_snake_case = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_snake_case = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_snake_case = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_snake_case = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
_snake_case = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase_ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase_ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> List[Any]:
_snake_case = AutoformerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase (self ) -> Any:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
_snake_case, _snake_case = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase (self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> Any:
_snake_case = inspect.signature(getattr(UpperCAmelCase , """forward""" ) )
# The main input is the name of the argument after `self`
_snake_case = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , """seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """decoder_seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """encoder_seq_length""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """d_model""" , UpperCAmelCase )
_snake_case = getattr(self.model_tester , """num_attention_heads""" , UpperCAmelCase )
_snake_case = d_model // num_attention_heads
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_snake_case = len(UpperCAmelCase )
_snake_case = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# decoder attentions
_snake_case = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_snake_case = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase ) )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase (self ) -> List[Any]:
super().test_retain_grad_hidden_states_attentions()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE="train-batch.pt" ):
_snake_case = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Union[str, Any]:
_snake_case = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch()
with torch.no_grad():
_snake_case = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_snake_case = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> str:
_snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_snake_case = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_snake_case = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Optional[int]:
_snake_case = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCAmelCase )
_snake_case = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_snake_case = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_snake_case = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase )
_snake_case = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase )
_snake_case = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) ) | 270 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = MgpstrTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = {}
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCAmelCase : Union[str, Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
def snake_case__ ( self, **__a):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = "tester"
_lowerCAmelCase : List[str] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__a)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase : str = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
_lowerCAmelCase : List[str] = tokenizer.encode([special_token], add_special_tokens=__a)
self.assertEqual(len(__a), 1)
_lowerCAmelCase : Optional[Any] = tokenizer.decode(__a, skip_special_tokens=__a)
self.assertTrue(special_token not in decoded)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_input_output_texts(__a)
_lowerCAmelCase : int = tokenizer.tokenize(__a)
_lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(__a)
_lowerCAmelCase : Dict = tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(__a)
self.assertNotEqual(len(__a), 0)
_lowerCAmelCase : List[str] = tokenizer.decode(__a)
self.assertIsInstance(__a, __a)
self.assertEqual(text_a.replace(" ", ""), __a)
@unittest.skip("MGP-STR tokenizer only handles one sequence.")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer")
def snake_case__ ( self):
'''simple docstring'''
pass
| 36 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_snake_case = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = mock.Mock()
_lowerCAmelCase : int = 500
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : str = HTTPError
_lowerCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request", return_value=__a) as mock_head:
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def snake_case__ ( self):
'''simple docstring'''
with self.assertRaises(__a):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor")
self.assertIsNotNone(__a)
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token)
_lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a)
image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor", trust_remote_code=__a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
| 36 | 1 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCAmelCase ):
_a : Optional[Any]= ["transformers", "torch", "note_seq"]
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(self ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,*snake_case ,**snake_case ):
'''simple docstring'''
requires_backends(cls ,["""transformers""", """torch""", """note_seq"""] )
| 364 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : Tuple = get_logger(__name__)
lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md"""
lowercase : Dict = uuida().hex
lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str:
lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if token is None:
lowercase : Optional[int] = HfFolder.get_token()
if organization is None:
lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None
lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : Optional[Any] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None:
if new_cache_dir is None:
lowercase : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Any = 0
else:
with open(cache_version_file) as f:
try:
lowercase : List[Any] = int(f.read())
except ValueError:
lowercase : int = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
if variant is not None:
lowercase : List[str] = weights_name.split(""".""" )
lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _snake_case( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase : Any = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 285 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str ) -> Optional[int]:
__A : List[Any] = checkpoint
__A : Union[str, Any] = {}
__A : Any = vae_state_dict['encoder.conv_in.weight']
__A : Optional[Any] = vae_state_dict['encoder.conv_in.bias']
__A : Dict = vae_state_dict['encoder.conv_out.weight']
__A : int = vae_state_dict['encoder.conv_out.bias']
__A : str = vae_state_dict['encoder.norm_out.weight']
__A : Dict = vae_state_dict['encoder.norm_out.bias']
__A : int = vae_state_dict['decoder.conv_in.weight']
__A : List[Any] = vae_state_dict['decoder.conv_in.bias']
__A : List[str] = vae_state_dict['decoder.conv_out.weight']
__A : int = vae_state_dict['decoder.conv_out.bias']
__A : str = vae_state_dict['decoder.norm_out.weight']
__A : Union[str, Any] = vae_state_dict['decoder.norm_out.bias']
__A : Dict = vae_state_dict['quant_conv.weight']
__A : Optional[int] = vae_state_dict['quant_conv.bias']
__A : int = vae_state_dict['post_quant_conv.weight']
__A : List[str] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__A : Any = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__A : List[Any] = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__snake_case )
}
# Retrieves the keys for the decoder up blocks only
__A : Union[str, Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__A : Tuple = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__snake_case )
}
for i in range(__snake_case ):
__A : Tuple = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__A : Tuple = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
__A : List[str] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
__A : Union[str, Any] = renew_vae_resnet_paths(__snake_case )
__A : int = {'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : Any = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__A : Union[str, Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__A : Any = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
__A : str = renew_vae_resnet_paths(__snake_case )
__A : Union[str, Any] = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : Any = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__A : List[Any] = renew_vae_attention_paths(__snake_case )
__A : Tuple = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
for i in range(__snake_case ):
__A : Any = num_up_blocks - 1 - i
__A : Tuple = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__A : str = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
__A : Tuple = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
__A : int = renew_vae_resnet_paths(__snake_case )
__A : int = {'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : List[str] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__A : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__A : List[Any] = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
__A : Any = renew_vae_resnet_paths(__snake_case )
__A : Optional[int] = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : Union[str, Any] = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__A : Optional[Any] = renew_vae_attention_paths(__snake_case )
__A : Union[str, Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
return new_checkpoint
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , ) -> int:
# Only support V1
__A : List[str] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__A : Dict = io.BytesIO(r.content )
__A : int = OmegaConf.load(__snake_case )
__A : Union[str, Any] = 5_12
__A : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__A : Optional[Any] = {}
with safe_open(__snake_case , framework='pt' , device='cpu' ) as f:
for key in f.keys():
__A : Any = f.get_tensor(__snake_case )
else:
__A : str = torch.load(__snake_case , map_location=__snake_case )['state_dict']
# Convert the VAE model.
__A : Optional[Any] = create_vae_diffusers_config(__snake_case , image_size=__snake_case )
__A : Union[str, Any] = custom_convert_ldm_vae_checkpoint(__snake_case , __snake_case )
__A : Optional[Any] = AutoencoderKL(**__snake_case )
vae.load_state_dict(__snake_case )
vae.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowercase__ : Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 190 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = ObjectDetectionPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0)
self.assertGreater(len(_UpperCAmelCase) , 0)
for detected_object in outputs:
self.assertEqual(
_UpperCAmelCase , {
'score': ANY(_UpperCAmelCase),
'label': ANY(_UpperCAmelCase),
'box': {'xmin': ANY(_UpperCAmelCase), 'ymin': ANY(_UpperCAmelCase), 'xmax': ANY(_UpperCAmelCase), 'ymax': ANY(_UpperCAmelCase)},
} , )
import datasets
__A : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
__A : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__A : Union[str, Any] = object_detector(_UpperCAmelCase , threshold=0.0)
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for outputs in batch_outputs:
self.assertGreater(len(_UpperCAmelCase) , 0)
for detected_object in outputs:
self.assertEqual(
_UpperCAmelCase , {
'score': ANY(_UpperCAmelCase),
'label': ANY(_UpperCAmelCase),
'box': {'xmin': ANY(_UpperCAmelCase), 'ymin': ANY(_UpperCAmelCase), 'xmax': ANY(_UpperCAmelCase), 'ymax': ANY(_UpperCAmelCase)},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__A : Any = AutoModelForObjectDetection.from_pretrained(_UpperCAmelCase)
__A : int = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase)
__A : Any = ObjectDetectionPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase)
__A : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
__A : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'facebook/detr-resnet-50'
__A : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(_UpperCAmelCase)
__A : Tuple = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase)
__A : List[Any] = ObjectDetectionPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase)
__A : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'facebook/detr-resnet-50'
__A : str = pipeline('object-detection' , model=_UpperCAmelCase)
__A : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 0.9985
__A : List[Any] = 'facebook/detr-resnet-50'
__A : List[str] = pipeline('object-detection' , model=_UpperCAmelCase)
__A : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_UpperCAmelCase)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
__A : Tuple = 0.9993
__A : str = pipeline('object-detection' , model=_UpperCAmelCase , threshold=_UpperCAmelCase)
__A : Optional[int] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , ) | 190 | 1 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__SCREAMING_SNAKE_CASE : Dict = NewType('DataClass', Any)
__SCREAMING_SNAKE_CASE : List[Any] = NewType('DataClassType', Any)
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Callable[[str], Any]:
snake_case_ = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
snake_case_ = [dataclass_types]
snake_case_ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = F"""--{field.name}"""
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
snake_case_ = kwargs.pop("""aliases""" , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , """UnionType""" ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F""" Problem encountered in field '{field.name}'.""" )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = """?"""
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = field.type.__args__[0]
snake_case_ = """+"""
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(F"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : DataClassType ) ->int:
"""simple docstring"""
if hasattr(UpperCAmelCase_ , """_argument_group_name""" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
snake_case_ = """.""".join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , ) ->Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
snake_case_ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip("""-""" ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=UpperCAmelCase_ )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ) ->Tuple[DataClass, ...]:
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}""" )
return tuple(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) ->Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(UpperCAmelCase_ ) , encoding="""utf-8""" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) ->Tuple[DataClass, ...]:
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 233 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 233 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = """open-llama"""
def __init__( self : Tuple , snake_case_ : Dict=1_0_0_0_0_0 , snake_case_ : Dict=4_0_9_6 , snake_case_ : Dict=1_1_0_0_8 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : List[Any]=3_2 , snake_case_ : Optional[Any]="silu" , snake_case_ : Union[str, Any]=2_0_4_8 , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Tuple=1e-6 , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=0 , snake_case_ : List[Any]=1 , snake_case_ : Tuple=2 , snake_case_ : List[Any]=False , snake_case_ : int=True , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : List[str] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
"use_memorry_efficient_attention" , snake_case_ )
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ , )
def lowercase ( self : Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
_UpperCAmelCase = self.rope_scaling.get("type" , snake_case_ )
_UpperCAmelCase = self.rope_scaling.get("factor" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 156 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str
_lowerCamelCase : int
def UpperCAmelCase_ ( __lowercase : str ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def UpperCAmelCase_ ( __lowercase : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCAmelCase = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCAmelCase = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCAmelCase = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Provide a string that I will generate its BWT transform: '''
__SCREAMING_SNAKE_CASE :Dict = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE :Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
__SCREAMING_SNAKE_CASE :Optional[int] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 156 | 1 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def _UpperCAmelCase ( snake_case ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _UpperCAmelCase ( ): # Main function for testing.
"""simple docstring"""
_lowerCAmelCase = Node(1 )
_lowerCAmelCase = Node(2 )
_lowerCAmelCase = Node(3 )
_lowerCAmelCase = Node(4 )
_lowerCAmelCase = Node(5 )
_lowerCAmelCase = Node(6 )
_lowerCAmelCase = Node(7 )
_lowerCAmelCase = Node(8 )
_lowerCAmelCase = Node(9 )
print(is_full_binary_tree(snake_case ) )
print(depth_of_tree(snake_case ) )
print("""Tree is: """ )
display(snake_case )
if __name__ == "__main__":
main()
| 82 | """simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: List[Any] = {}
lowercase__: List[Any] = {}
lowercase__: Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__: Dict = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowercase__: Any = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowercase__: Optional[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__: Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowercase__: Tuple = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowercase__: List[str] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowercase__: str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowercase__: List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowercase__: Dict = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowercase__: int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ):
lowercase__: Union[str, Any] = load_image(_UpperCAmelCase )
lowercase__: Dict = self.image_processor.size['''longest_edge''']
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
lowercase__: Tuple = self.get_inference_context()
with inference_context():
lowercase__: Optional[Any] = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
lowercase__: Any = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
lowercase__: Tuple = image_embeddings
lowercase__: Optional[Any] = grid_points.shape[1]
lowercase__: Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = grid_points[:, i : i + points_per_batch, :, :]
lowercase__: int = input_labels[:, i : i + points_per_batch]
lowercase__: Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0.88 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ):
lowercase__: List[Any] = model_inputs.pop('''input_boxes''' )
lowercase__: List[Any] = model_inputs.pop('''is_last''' )
lowercase__: Any = model_inputs.pop('''original_sizes''' ).tolist()
lowercase__: Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
lowercase__: List[Any] = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__: int = model_outputs['''pred_masks''']
lowercase__: str = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
lowercase__: str = model_outputs['''iou_scores''']
lowercase__, lowercase__, lowercase__: Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ):
lowercase__: int = []
lowercase__: str = []
lowercase__: List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
lowercase__: Any = torch.cat(_UpperCAmelCase )
lowercase__: Dict = torch.cat(_UpperCAmelCase )
lowercase__, lowercase__, lowercase__, lowercase__: Any = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
lowercase__: Any = {}
if output_rle_mask:
lowercase__: Optional[Any] = rle_mask
if output_bboxes_mask:
lowercase__: Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 177 | 0 |
from __future__ import annotations
def snake_case( __magic_name__ ) -> list[int]:
'''simple docstring'''
lowercase : int = 2
lowercase : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__magic_name__ )
if n > 1:
factors.append(__magic_name__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 116 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse} | 116 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( __a ):
def __init__( self : Tuple , __lowercase : str , __lowercase : Union[str, Any]=13 , __lowercase : Union[str, Any]=7 , __lowercase : int=True , __lowercase : str=True , __lowercase : Any=True , __lowercase : str=True , __lowercase : str=99 , __lowercase : Any=32 , __lowercase : List[str]=5 , __lowercase : List[str]=4 , __lowercase : str=37 , __lowercase : int="gelu" , __lowercase : int=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=512 , __lowercase : Union[str, Any]=16 , __lowercase : Dict=2 , __lowercase : int=0.0_2 , __lowercase : int=False , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]="None" , __lowercase : Optional[Any]=3 , __lowercase : Optional[int]=4 , __lowercase : List[Any]=None , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =seq_length
__lowercase =is_training
__lowercase =use_input_mask
__lowercase =use_token_type_ids
__lowercase =use_labels
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =num_labels
__lowercase =num_choices
__lowercase =relative_attention
__lowercase =position_biased_input
__lowercase =pos_att_type
__lowercase =scope
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase =None
if self.use_input_mask:
__lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase =None
if self.use_token_type_ids:
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase =None
__lowercase =None
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase =ids_tensor([self.batch_size] , self.num_choices )
__lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : int ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self : Tuple ):
"""simple docstring"""
__lowercase =self.get_config()
__lowercase =300
return config
def snake_case ( self : Optional[int] , __lowercase : List[Any] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self : str , __lowercase : Optional[int] , __lowercase : str , __lowercase : Any , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : int ):
"""simple docstring"""
__lowercase =DebertaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )[0]
__lowercase =model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )[0]
__lowercase =model(lowerCAmelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Any , __lowercase : Any , __lowercase : Any ):
"""simple docstring"""
__lowercase =DebertaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : int , __lowercase : str , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : int , __lowercase : Dict , __lowercase : int , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =self.num_labels
__lowercase =DebertaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase__ )
def snake_case ( self : Tuple , __lowercase : int , __lowercase : Tuple , __lowercase : Dict , __lowercase : Dict , __lowercase : str , __lowercase : List[Any] , __lowercase : Tuple ):
"""simple docstring"""
__lowercase =self.num_labels
__lowercase =DebertaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Any , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Dict , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =DebertaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
lowerCAmelCase_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =DebertaModelTester(self )
__lowercase =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase__ )
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase__ )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase__ )
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =DebertaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =DebertaModel.from_pretrained('microsoft/deberta-base' )
__lowercase =torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowercase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
# compare the actual values for a slice.
__lowercase =torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 141 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__magic_name__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , UpperCamelCase_ ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(UpperCamelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__magic_name__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 100 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# Initialise PyTorch model
__lowerCAmelCase = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCAmelCase = AlbertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 359 |
import math
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ):
try:
__lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
__lowerCAmelCase = []
__lowerCAmelCase = 2
while len(SCREAMING_SNAKE_CASE_ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE_ ):
primes.append(SCREAMING_SNAKE_CASE_ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Dict:
"""simple docstring"""
def decorator(_UpperCamelCase : Optional[int] ):
snake_case = getattr(SCREAMING_SNAKE_CASE_ , 'handle_key' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , 'handle_key' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def lowerCAmelCase__ ( *_UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
def decorator(_UpperCamelCase : Dict ):
snake_case = getattr(SCREAMING_SNAKE_CASE_ , 'handle_key' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , 'handle_key' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class lowerCAmelCase_ ( __snake_case ):
"""simple docstring"""
def __new__( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = super().__new__(cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not hasattr(lowerCAmelCase , 'key_handler' ):
setattr(lowerCAmelCase , 'key_handler' , {} )
setattr(lowerCAmelCase , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
snake_case = getattr(lowerCAmelCase , 'handle_key' , [] )
for key in handled_keys:
snake_case = value
return new_cls
@staticmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = get_character()
if char != KEYMAP["undefined"]:
snake_case = ord(lowerCAmelCase )
snake_case = cls.key_handler.get(lowerCAmelCase )
if handler:
snake_case = char
return handler(cls )
else:
return None
def lowerCAmelCase__ ( cls : List[Any] ) -> Dict:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 150 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : int = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
lowercase__ : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[int] = """sigmoid"""
__lowerCamelCase : List[Any] = """softmax"""
__lowerCamelCase : Optional[Any] = """none"""
@add_end_docstrings(
__snake_case , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = ClassificationFunction.NONE
def __init__( self , **a):
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def snake_case_ ( self , a=None , a=None , a="" , **a):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowercase__ : List[str] = tokenizer_kwargs
lowercase__ : Dict = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
lowercase__ : Union[str, Any] = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
lowercase__ : Tuple = top_k
lowercase__ : Any = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
lowercase__ : Union[str, Any] = None
else:
lowercase__ : Union[str, Any] = 1
if isinstance(a , a):
lowercase__ : Tuple = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase__ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a):
lowercase__ : int = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase__ : Any = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case_ ( self , a , **a):
lowercase__ : List[str] = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def snake_case_ ( self , a):
return self.model(**a)
def snake_case_ ( self , a , a=None , a=1 , a=True):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase__ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase__ : Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
lowercase__ : List[str] = self.model.config.function_to_apply
else:
lowercase__ : Union[str, Any] = ClassificationFunction.NONE
lowercase__ : str = model_outputs['logits'][0]
lowercase__ : Dict = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase__ : int = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase__ : List[Any] = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
lowercase__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase__ : Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
lowercase__ : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 214 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt'}
UpperCamelCase__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCamelCase__ = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
UpperCamelCase__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ConvBertTokenizer
def __init__(self : Optional[Any] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : str="[SEP]" , __UpperCAmelCase : List[str]="[PAD]" , __UpperCAmelCase : List[str]="[CLS]" , __UpperCAmelCase : List[Any]="[MASK]" , __UpperCAmelCase : str=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : int , ) -> int:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**__UpperCAmelCase )
UpperCAmelCase__ = do_lower_case
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ (self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ (self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 143 | def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Initialise PyTorch model
lowerCamelCase_ = TaConfig.from_json_file(lowerCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase_ = TaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 19 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( __lowercase ):
lowercase__ : Optional[int] = (KDPMaDiscreteScheduler,)
lowercase__ : List[str] = 10
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __snake_case( self : Dict ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE = model(_a , _a )
SCREAMING_SNAKE_CASE = scheduler.step(_a , _a , _a )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1_112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE = model(_a , _a )
SCREAMING_SNAKE_CASE = scheduler.step(_a , _a , _a )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(_a , _a )
SCREAMING_SNAKE_CASE = model(_a , _a )
SCREAMING_SNAKE_CASE = scheduler.step(_a , _a , _a )
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
if str(_a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 365 | from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_lowerCamelCase : Tuple = {'''allegro/herbert-base-cased''': 5_14}
_lowerCamelCase : Optional[int] = {}
class lowercase ( a ):
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = HerbertTokenizer
def __init__( self : Dict , _UpperCamelCase : Any=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Union[str, Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple="</s>" , **_UpperCamelCase : Any , ) -> str:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206 | 0 |
"""simple docstring"""
from math import pow, sqrt
def _A ( *UpperCamelCase_ : float) -> bool:
'''simple docstring'''
__lowercase = len(UpperCamelCase_) > 0 and all(value > 0.0 for value in values)
return result
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_)
else ValueError("Input Error: Molar mass values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a, 2), 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a, 2) / molar_mass, 6)
if validate(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
| 17 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase : Any = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
_lowerCamelCase : int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = NllbTokenizer
UpperCamelCase = []
UpperCamelCase = []
def __init__( self : Optional[Any], __A : Tuple=None, __A : int=None, __A : List[Any]="<s>", __A : Tuple="</s>", __A : Any="</s>", __A : Optional[Any]="<s>", __A : Tuple="<unk>", __A : str="<pad>", __A : Dict="<mask>", __A : Optional[Any]=None, __A : List[Any]=None, __A : List[Any]=None, __A : str=False, **__A : Tuple, ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : int = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else mask_token
UpperCAmelCase : str = legacy_behaviour
super().__init__(
vocab_file=__A, tokenizer_file=__A, bos_token=__A, eos_token=__A, sep_token=__A, cls_token=__A, unk_token=__A, pad_token=__A, mask_token=__A, src_lang=__A, tgt_lang=__A, additional_special_tokens=__A, legacy_behaviour=__A, **__A, )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
UpperCAmelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCAmelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : List[Any] = src_lang if src_lang is not None else '''eng_Latn'''
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Union[str, Any], __A : str ):
UpperCAmelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : Tuple, __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : str, __A : Optional[int], __A : str, __A : Optional[str], __A : Optional[str], **__A : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase : Optional[Any] = src_lang
UpperCAmelCase : Optional[Any] = self(__A, add_special_tokens=__A, return_tensors=__A, **__A )
UpperCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(__A )
UpperCAmelCase : int = tgt_lang_id
return inputs
def __magic_name__ ( self : List[str], __A : List[str], __A : str = "eng_Latn", __A : Optional[List[str]] = None, __A : str = "fra_Latn", **__A : Tuple, ):
UpperCAmelCase : Any = src_lang
UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(__A, __A, **__A )
def __magic_name__ ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Union[str, Any], __A : List[str] ):
UpperCAmelCase : int = self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : Any = [self.cur_lang_code]
UpperCAmelCase : Optional[Any] = [self.eos_token_id]
UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def __magic_name__ ( self : Dict, __A : str ):
UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : int = [self.cur_lang_code]
UpperCAmelCase : List[Any] = [self.eos_token_id]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def __magic_name__ ( self : Dict, __A : str, __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCAmelCase : Tuple = os.path.join(
__A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file, __A )
return (out_vocab_file,)
| 99 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=None ) -> List[Any]:
if rng is None:
UpperCAmelCase : Dict = random.Random()
UpperCAmelCase : Optional[Any] = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase : List[str] = []
for _ in range(UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase : List[str] = np.array(UpperCAmelCase , dtype=jnp.intaa ).reshape(UpperCAmelCase )
return output
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
UpperCAmelCase : Optional[int] = ids_tensor(UpperCAmelCase , vocab_size=2 , rng=UpperCAmelCase )
# make sure that at least one token is attended to for each batch
UpperCAmelCase : str = 1
return attn_mask
@require_flax
class __UpperCAmelCase :
UpperCamelCase = None
UpperCamelCase = ()
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase : Dict = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase : Optional[int] = jnp.ones_like(__A )
UpperCAmelCase : Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase : Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Any = max_length
UpperCAmelCase : List[Any] = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : List[Any] = getattr(__A, __A )
UpperCAmelCase : Union[str, Any] = pt_model_class(__A ).eval()
UpperCAmelCase : Tuple = load_flax_weights_in_pytorch_model(__A, flax_model.params )
UpperCAmelCase : Dict = flax_model.generate(__A ).sequences
UpperCAmelCase : str = pt_model.generate(torch.tensor(__A, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = False
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[int] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : str = jit(model.generate )
UpperCAmelCase : List[Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = self._get_input_ids_and_config()
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : List[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : str = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : int = jit(model.generate )
UpperCAmelCase : Union[str, Any] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self._get_input_ids_and_config()
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = max_length
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : str = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def __magic_name__ ( self : Any ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = self._get_input_ids_and_config()
UpperCAmelCase : str = True
UpperCAmelCase : Union[str, Any] = max_length
UpperCAmelCase : Union[str, Any] = 0.8
UpperCAmelCase : str = 1_0
UpperCAmelCase : Any = 0.3
UpperCAmelCase : str = 1
UpperCAmelCase : Union[str, Any] = 8
UpperCAmelCase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[int] = jit(model.generate )
UpperCAmelCase : Any = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self._get_input_ids_and_config()
UpperCAmelCase : Optional[Any] = max_length
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : int = model_class(__A )
UpperCAmelCase : List[str] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Dict = jit(model.generate )
UpperCAmelCase : List[str] = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase : List[str] = max_length
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = 8
UpperCAmelCase : int = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[str] = jit(model.generate )
UpperCAmelCase : Tuple = jit_generate(__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : str ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : List[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Optional[Any] = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Optional[Any] = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : int = model.generate(__A, attention_mask=__A ).sequences
self.assertEqual(generation_outputs.shape[-1], __A )
UpperCAmelCase : Optional[Any] = jit(model.generate )
UpperCAmelCase : Dict = jit_generate(__A, attention_mask=__A ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase : List[str] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : int = '''Hello world'''
UpperCAmelCase : Optional[int] = tokenizer(__A, return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__A, '''do_samples''' ):
model.generate(__A, do_samples=__A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__A, '''foo''' ):
UpperCAmelCase : Any = {'''foo''': '''bar'''}
model.generate(__A, **__A )
| 99 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : Tuple = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : List[str] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : Union[str, Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Any = self.dummy_sample
UpperCAmelCase_ : Union[str, Any] = 0.1 * sample
UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : List[str] = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : str = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : List[str] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Union[str, Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : Tuple = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop()
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Optional[int] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 61 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : List[str] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : int = (1 - _cos) / 2
UpperCAmelCase_ : Optional[Any] = 1 - _cos
UpperCAmelCase_ : int = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = (1 + _cos) / 2
UpperCAmelCase_ : Optional[int] = -1 - _cos
UpperCAmelCase_ : Union[str, Any] = 1 + alpha
UpperCAmelCase_ : Optional[int] = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : str = sin(__lowerCamelCase )
UpperCAmelCase_ : Tuple = cos(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = _sin / 2
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Tuple = -ba
UpperCAmelCase_ : Optional[Any] = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Any = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = cos(__lowerCamelCase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : str = -2 * _cos
UpperCAmelCase_ : Any = 1 + alpha
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Union[str, Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[Any] = 1 + alpha * big_a
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : str = 1 + alpha / big_a
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : str = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Tuple = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : List[str] = big_a * (pmc + aaa)
UpperCAmelCase_ : int = 2 * big_a * mpc
UpperCAmelCase_ : int = big_a * (pmc - aaa)
UpperCAmelCase_ : Dict = ppmc + aaa
UpperCAmelCase_ : Any = -2 * pmpc
UpperCAmelCase_ : List[str] = ppmc - aaa
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : Optional[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : List[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Any = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : Any = big_a * (ppmc + aaa)
UpperCAmelCase_ : Union[str, Any] = -2 * big_a * pmpc
UpperCAmelCase_ : Dict = big_a * (ppmc - aaa)
UpperCAmelCase_ : Optional[int] = pmc + aaa
UpperCAmelCase_ : Union[str, Any] = 2 * mpc
UpperCAmelCase_ : int = pmc - aaa
UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 61 | 1 |
import datasets
from .evaluate import evaluate
__A : Union[str, Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
__A : List[str] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
__A : Optional[int] = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) ,codebase_urls=["""https://www.atticusprojectai.org/cuad"""] ,reference_urls=["""https://www.atticusprojectai.org/cuad"""] ,)
def a__ ( self :List[str] ,_UpperCamelCase :str ,_UpperCamelCase :List[str] ):
snake_case_ : Tuple = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case_ : Optional[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case_ : str = evaluate(dataset=__lowerCAmelCase ,predictions=__lowerCAmelCase )
return score | 359 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : Optional[Any] = np.asarray(weights[0] )
snake_case_ : int = np.asarray(weights[1] )
snake_case_ : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : List[Any] = np.asarray(weights[0] )
snake_case_ : Optional[int] = np.asarray(weights[1] )
snake_case_ : Union[str, Any] = np.asarray(weights[2] )
snake_case_ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
# layernorm 1
snake_case_ : str = weights[0][0][0]
snake_case_ : int = np.asarray(layer_norm_a[0] )
snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# lsh weights + output
snake_case_ : Tuple = weights[0][1]
if len(lowerCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
else:
set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
# intermediate weighs
snake_case_ : str = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase_ ) == 4:
snake_case_ : List[Any] = intermediate_weights[2]
# layernorm 2
snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] )
snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# intermediate dense
snake_case_ : Any = np.asarray(intermediate_weights[1][0] )
snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
# intermediate out
snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] )
snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ):
'''simple docstring'''
# reformer model
snake_case_ : Dict = torch_model.reformer
# word embeds
snake_case_ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , )
if isinstance(weights[3] , lowerCamelCase_ ):
snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) )
snake_case_ : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# output layer norm
snake_case_ : Optional[Any] = np.asarray(weights[7][0] )
snake_case_ : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# output embeddings
snake_case_ : Optional[int] = np.asarray(weights[9][0] )
snake_case_ : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
# Initialise PyTorch model
snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 8 | 0 |
'''simple docstring'''
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple[float, float, float]:
_snake_case = point_y / 4 / point_x
_snake_case = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_snake_case = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_snake_case = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_snake_case = outgoing_gradient**2 + 4
_snake_case = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_snake_case = (point_y - outgoing_gradient * point_x) ** 2 - 100
_snake_case = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_snake_case = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_snake_case = x_minus if isclose(__A , __A ) else x_plus
_snake_case = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( __A = 1.4 , __A = -9.6 ) -> int:
_snake_case = 0
_snake_case = first_x_coord
_snake_case = first_y_coord
_snake_case = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_snake_case , _snake_case , _snake_case = next_point(__A , __A , __A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["input_features"]
def __init__( self , A=80 , A=1_60_00 , A=1_60 , A=30 , A=4_00 , A=0.0 , A=False , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
lowerCamelCase = n_fft
lowerCamelCase = hop_length
lowerCamelCase = chunk_length
lowerCamelCase = chunk_length * sampling_rate
lowerCamelCase = self.n_samples // hop_length
lowerCamelCase = sampling_rate
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , )
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = np.maximum(A , log_spec.max() - 8.0 )
lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( A , A , A = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCamelCase = np.array(A , np.intaa )
lowerCamelCase = []
for vector, length in zip(A , attention_mask.sum(-1 ) ):
lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase = padding_value
normed_input_values.append(A )
else:
lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , A , A = True , A = None , A = None , A = None , A = "max_length" , A = None , A = None , A = None , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
lowerCamelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase = self.pad(
A , padding=A , max_length=max_length if max_length else self.n_samples , truncation=A , pad_to_multiple_of=A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase = [self._np_extract_fbank_features(A ) for waveform in input_features[0]]
if isinstance(input_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase = padded_inputs.convert_to_tensors(A )
return padded_inputs
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 252 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[Any] = '''vit'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1E-12 , SCREAMING_SNAKE_CASE__ : str=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=1_6 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : int = num_attention_heads
a_ : Union[str, Any] = intermediate_size
a_ : str = hidden_act
a_ : Union[str, Any] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : str = initializer_range
a_ : Dict = layer_norm_eps
a_ : Tuple = image_size
a_ : List[Any] = patch_size
a_ : Union[str, Any] = num_channels
a_ : Dict = qkv_bias
a_ : Optional[int] = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1E-4
| 120 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 | 1 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCamelCase : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
else:
__UpperCamelCase : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCamelCase , __UpperCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
__UpperCamelCase : Optional[int] = ["key_proj", "value_proj", "query_proj"]
__UpperCamelCase : Union[str, Any] = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
__UpperCamelCase : int = key.split("." )
if attributes[0] == "lm_head":
__UpperCamelCase : int = prophet
__UpperCamelCase : List[Any] = prophet_old
else:
__UpperCamelCase : Dict = prophet.prophetnet
__UpperCamelCase : int = prophet_old.model
__UpperCamelCase : Tuple = False
for attribute in attributes:
if attribute in mapping:
__UpperCamelCase : Dict = mapping[attribute]
if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__UpperCamelCase : Optional[Any] = attribute
elif hasattr(snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCamelCase : Dict = old_model.weight
logger.info(F"{attribute} is initialized." )
__UpperCamelCase : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCamelCase : Optional[Any] = old_model.bias
logger.info(F"{attribute} is initialized" )
__UpperCamelCase : int = True
break
elif attribute in special_keys and hasattr(snake_case__ , "in_proj_weight" ):
__UpperCamelCase : Dict = old_model.in_proj_weight.shape[0] // 3
__UpperCamelCase : List[str] = getattr(snake_case__ , snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCamelCase : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCamelCase : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCamelCase : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCamelCase : Optional[int] = True
break
if attribute.isdigit():
__UpperCamelCase : Any = model[int(snake_case__ )]
__UpperCamelCase : Tuple = old_model[int(snake_case__ )]
else:
__UpperCamelCase : Optional[Any] = getattr(snake_case__ , snake_case__ )
if old_attribute == "":
__UpperCamelCase : List[Any] = old_model
else:
if not hasattr(snake_case__ , snake_case__ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__UpperCamelCase : Dict = getattr(snake_case__ , snake_case__ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 298 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : int = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ = k.replace(_lowerCAmelCase , _lowerCAmelCase )
if k.startswith('encoder' ):
lowerCamelCase_ = k.replace('.attn' , '.self_attn' )
lowerCamelCase_ = k.replace('norm1' , 'self_attn_layer_norm' )
lowerCamelCase_ = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
lowerCamelCase_ = k.replace('norm1' , 'self_attn_layer_norm' )
lowerCamelCase_ = k.replace('norm2' , 'encoder_attn_layer_norm' )
lowerCamelCase_ = k.replace('norm3' , 'final_layer_norm' )
return k
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
lowerCamelCase_ = sd.pop(_lowerCAmelCase )
lowerCamelCase_ = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
lowerCamelCase_ = v
lowerCamelCase : List[Any] = ['''START''']
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Any , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = torch.load(_lowerCAmelCase , map_location='cpu' )
lowerCamelCase_ = model["model"]
lowerCamelCase_ = BlenderbotConfig.from_json_file(_lowerCAmelCase )
lowerCamelCase_ = BlenderbotForConditionalGeneration(_lowerCAmelCase )
lowerCamelCase_ = m.model.state_dict().keys()
lowerCamelCase_ = []
lowerCamelCase_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ = rename_state_dict_key(_lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCAmelCase )
m.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
m.half()
m.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 354 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 208 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __UpperCAmelCase ):
__snake_case = 42
__snake_case = 42
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self, UpperCamelCase__ = 1, UpperCamelCase__ = 2000, UpperCamelCase__ = None, UpperCamelCase__ = "pil", UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = self.unet.config.sample_size
lowerCAmelCase_ = (batch_size, 3, img_size, img_size)
lowerCAmelCase_ = self.unet
lowerCAmelCase_ = randn_tensor(UpperCamelCase__, generator=UpperCamelCase__ ) * self.scheduler.init_noise_sigma
lowerCAmelCase_ = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
self.scheduler.set_sigmas(UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase_ = self.unet(UpperCamelCase__, UpperCamelCase__ ).sample
lowerCAmelCase_ = self.scheduler.step_correct(UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
# prediction step
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ).sample
lowerCAmelCase_ = self.scheduler.step_pred(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = output.prev_sample, output.prev_sample_mean
lowerCAmelCase_ = sample_mean.clamp(0, 1 )
lowerCAmelCase_ = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 278 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_A = '''scheduler_config.json'''
class A ( __UpperCAmelCase ):
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
__snake_case = 5
__snake_case = 6
__snake_case = 7
__snake_case = 8
__snake_case = 9
__snake_case = 10
__snake_case = 11
__snake_case = 12
__snake_case = 13
__snake_case = 14
@dataclass
class A ( __UpperCAmelCase ):
__snake_case = 42
class A :
__snake_case = SCHEDULER_CONFIG_NAME
__snake_case = []
__snake_case = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, )
return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ):
"""simple docstring"""
self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase_ = [
getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ )
]
return compatible_classes
| 278 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase: str = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[Any] = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 1 |
def snake_case ( snake_case__ :str , snake_case__ :str) -> List[str]:
assert x is not None
assert y is not None
_A = len(snake_case__)
_A = len(snake_case__)
# declaring the array for storing the dp values
_A = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
for i in range(1 , m + 1):
for j in range(1 , n + 1):
_A = 1 if x[i - 1] == y[j - 1] else 0
_A = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match)
_A = """"""
_A , _A = m, n
while i > 0 and j > 0:
_A = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_A = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 'AGGTAB'
_SCREAMING_SNAKE_CASE = 'GXTXAYB'
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 'GTAB'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 180 | from math import isqrt
def snake_case ( snake_case__ :int) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__) + 1))
def snake_case ( snake_case__ :int = 10**6) -> int:
_A = 0
_A = 1
_A = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any]=False ):
'''simple docstring'''
lowercase_ , lowercase_ = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = r".*sequential.(\d+).*"
lowercase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase_ = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
lowercase_ = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
lowercase_ = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase_ = 1 if projecton_layer == 0 else 2
lowercase_ = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase_ = value
lowercase_ = mixed_qkv.size(0 ) // 3
lowercase_ = mixed_qkv[:qkv_dim]
lowercase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase_ = mixed_qkv[qkv_dim * 2 :]
lowercase_ = query_layer
lowercase_ = key_layer
lowercase_ = value_layer
else:
lowercase_ = value
return model_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int]=False ):
'''simple docstring'''
lowercase_ , lowercase_ = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
lowercase_ = clap_model.state_dict()
lowercase_ = rename_state_dict(__lowerCamelCase )
lowercase_ = ClapConfig()
lowercase_ = enable_fusion
lowercase_ = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 297 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 11 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''MobileNetV2FeatureExtractor''']
lowerCamelCase : Any = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 356 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a ) | 306 | 0 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__magic_name__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , UpperCamelCase_ ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(UpperCamelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__magic_name__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 100 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
snake_case , snake_case , snake_case = t // 36_00, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=3_00 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case = F'''{elt:.6f}''' if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) else str(UpperCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A__ :
"""simple docstring"""
__magic_name__ = 5
__magic_name__ = 0.2
def __init__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = 3_0_0 , ):
snake_case = total
snake_case = '''''' if prefix is None else prefix
snake_case = leave
snake_case = parent
snake_case = width
snake_case = None
snake_case = None
snake_case = None
def a_ ( self , __snake_case , __snake_case = False , __snake_case = None ):
snake_case = value
if comment is not None:
snake_case = comment
if self.last_value is None:
snake_case = snake_case = time.time()
snake_case = snake_case = value
snake_case = snake_case = None
snake_case = self.warmup
snake_case = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case = time.time()
snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case = self.elapsed_time / (value - self.start_value)
else:
snake_case = None
if value >= self.total:
snake_case = self.total
snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
snake_case = value
snake_case = current_time
if self.average_time_per_item is None:
snake_case = 1
else:
snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None ):
super().__init__(__snake_case )
snake_case = None if column_names is None else [column_names]
snake_case = None
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self , __snake_case ):
if self.inner_table is None:
snake_case = [list(values.keys() ), list(values.values() )]
else:
snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def a_ ( self , __snake_case , __snake_case=None , __snake_case=3_0_0 ):
snake_case = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def a_ ( self ):
snake_case = None
self.display()
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = None
snake_case = None
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case = 0
snake_case = 0
snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case = NotebookTrainingTracker(state.max_steps , __snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case = self.training_tracker.add_child(len(__snake_case ) )
else:
snake_case = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case = None
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case = state.global_step
self.training_tracker.write_line(__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if self.training_tracker is not None:
snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case = log['''loss''']
break
if self.first_column == "Epoch":
snake_case = int(state.epoch )
else:
snake_case = state.global_step
snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case = re.sub(R'''\_loss$''' , '''''' , __snake_case )
snake_case = metrics.pop('''total_flos''' , __snake_case )
snake_case = metrics.pop('''epoch''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __snake_case )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
snake_case = v
else:
snake_case = k.split('''_''' )
snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
snake_case = None
# Evaluation takes a long time so we should force the next update.
snake_case = True
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__snake_case )
snake_case = None
| 127 | 0 |
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = [0] * len(a__ )
_UpperCAmelCase = []
_UpperCAmelCase = [1] * len(a__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a__ ) ):
if indegree[i] == 0:
queue.append(a__ )
while queue:
_UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a__ )
print(max(a__ ) )
# Adjacency list of Graph
lowerCAmelCase__ :str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 185 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
__snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__snake_case = {
"allenai/led-base-16384": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _lowercase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ = bs[:]
SCREAMING_SNAKE_CASE__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
return pairs
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict =VOCAB_FILES_NAMES
A__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int="replace" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<pad>" , UpperCAmelCase_ : Dict="<mask>" , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : int , ):
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A_ ( self : int ):
return len(self.encoder )
def A_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Any , UpperCAmelCase_ : Dict ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(UpperCAmelCase_ ):
try:
SCREAMING_SNAKE_CASE__ = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ' '.join(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = word
return word
def A_ ( self : Optional[int] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = []
for token in re.findall(self.pat , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_ ).split(' ' ) )
return bpe_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Any ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def A_ ( self : Any , UpperCAmelCase_ : Tuple ):
return self.decoder.get(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = ''.join(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '\n' )
SCREAMING_SNAKE_CASE__ = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(' '.join(UpperCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ = ' ' + text
return (text, kwargs)
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE__ = super()._pad(
encoded_inputs=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE__ = len(encoded_inputs['global_attention_mask'] ) != len(UpperCAmelCase_ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 176 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = GPTaTokenizer
UpperCamelCase__ = GPTaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a = {"""unk_token""": """<unk>"""}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :Dict , **__magic_name__ :List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[str] ):
'''simple docstring'''
a = """lower newer"""
a = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = """lower newer"""
a = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + [tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = """lower newer"""
# Testing tokenization
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids without special tokens
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids with special tokens
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing the unknown token
a = tokens + [rust_tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] , *__magic_name__ :Tuple , **__magic_name__ :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input looooooooong""", """This is a simple input"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a = tokenizer.pad_token_id
a = tokenizer(__magic_name__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
a = tokenizer(*__magic_name__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = """$$$"""
a = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__magic_name__ , add_bos_token=__magic_name__ )
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = tokenizer.bos_token_id
a = tokenizer(__magic_name__ )
a = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0] , __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a = tokenizer.decode(out_s.input_ids )
a = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [self.get_tokenizer(do_lower_case=__magic_name__ , add_bos_token=__magic_name__ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """Encode this."""
a = """This one too please."""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
encoded_sequence += tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode_plus(
__magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , )
a = encoded_sequence_dict["""input_ids"""]
a = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
a = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__magic_name__ )
]
a = [x for x in filtered_sequence if x is not None]
self.assertEqual(__magic_name__ , __magic_name__ )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
a = AutoTokenizer.from_pretrained("""./test_opt""" )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# Same as above
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """bos"""
a = tokenizer.get_vocab()["""bos"""]
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# We changed the bos token
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
a = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 228 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = CycleDiffusionPipeline
__UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__UpperCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
_A: Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_A: Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_A: List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A: Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_A: Optional[int] = CLIPTextModel(lowerCAmelCase_ )
_A: Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A: Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
_A: Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_A: int = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_A: Dict = torch.manual_seed(lowerCAmelCase_ )
else:
_A: int = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A: List[str] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A: int = self.get_dummy_components()
_A: Dict = CycleDiffusionPipeline(**lowerCAmelCase_ )
_A: int = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Any = self.get_dummy_inputs(lowerCAmelCase_ )
_A: int = pipe(**lowerCAmelCase_ )
_A: Tuple = output.images
_A: List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_A: int = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCAmelCase_ , '''half''' ):
_A: Dict = module.half()
_A: Optional[Any] = CycleDiffusionPipeline(**lowerCAmelCase_ )
_A: int = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: Any = self.get_dummy_inputs(lowerCAmelCase_ )
_A: Optional[int] = pipe(**lowerCAmelCase_ )
_A: int = output.images
_A: Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_A: Optional[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def __magic_name__ ( self : str ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __magic_name__ ( self : str ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : int ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
_A: List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
_A: Any = init_image.resize((5_1_2, 5_1_2) )
_A: str = '''CompVis/stable-diffusion-v1-4'''
_A: Dict = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='''scheduler''' )
_A: int = CycleDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A: Tuple = '''A black colored car'''
_A: Any = '''A blue colored car'''
_A: Tuple = torch.manual_seed(0 )
_A: List[str] = pipe(
prompt=lowerCAmelCase_ , source_prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase_ , output_type='''np''' , )
_A: List[str] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
_A: Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
_A: Optional[int] = init_image.resize((5_1_2, 5_1_2) )
_A: Union[str, Any] = '''CompVis/stable-diffusion-v1-4'''
_A: List[str] = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='''scheduler''' )
_A: Dict = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A: Optional[int] = '''A black colored car'''
_A: Union[str, Any] = '''A blue colored car'''
_A: List[str] = torch.manual_seed(0 )
_A: Optional[int] = pipe(
prompt=lowerCAmelCase_ , source_prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase_ , output_type='''np''' , )
_A: int = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 301 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( a , a = True , a = math.inf , a = -math.inf , a = math.inf , a = -math.inf , a = False , a = 1_00 , a = 0.01 , a = 1 , ) -> Any:
_A: Optional[Any] = False
_A: Dict = search_prob
_A: str = start_temperate
_A: Optional[int] = []
_A: int = 0
_A: Dict = None
while not search_end:
_A: Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_A: List[Any] = current_state
scores.append(a )
iterations += 1
_A: List[str] = None
_A: str = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_A: Any = random.randint(0 , len(a ) - 1 ) # picking a random neighbor
_A: Union[str, Any] = neighbors.pop(a )
_A: List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_A: Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_A: str = picked_neighbor
else:
_A: Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_A: Optional[int] = picked_neighbor
_A: Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_A: Any = True
else:
_A: List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a ) , a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase__ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
UpperCAmelCase__ : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase__ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
| 301 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : Any , __snake_case : List[str]=3 , __snake_case : str=32 , __snake_case : Tuple=3 , __snake_case : Dict=10 , __snake_case : List[Any]=[10, 20, 30, 40] , __snake_case : List[Any]=[1, 1, 2, 1] , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : Any="relu" , __snake_case : Union[str, Any]=3 , __snake_case : List[str]=None , ) -> List[str]:
UpperCAmelCase : int = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : List[str] = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(__snake_case )
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Tuple:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[Any] ) -> List[Any]:
UpperCAmelCase : int = TFResNetModel(config=__snake_case )
UpperCAmelCase : Tuple = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = TFResNetForImageClassification(__snake_case )
UpperCAmelCase : Tuple = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str ) -> Tuple:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[Any] = TFResNetModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> Dict:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : str ) -> Dict:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : Dict ) -> Any:
pass
def A ( self : Optional[int] ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__snake_case )
UpperCAmelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Dict ) -> str:
def check_hidden_states_output(__snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] ):
UpperCAmelCase : List[Any] = model_class(__snake_case )
UpperCAmelCase : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : int = layer_type
UpperCAmelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def A ( self : Tuple ) -> int:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def A ( self : str ) -> Optional[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : str ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=__snake_case , return_tensors='''tf''' )
# forward pass
UpperCAmelCase : Dict = model(**__snake_case )
# verify the logits
UpperCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase : Any = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1E-4 ) )
| 23 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_a : Tuple= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : int , *_A : str , **_A : List[str]) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 357 | """simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_a : int= NewType("DataClass", Any)
_a : Dict= NewType("DataClassType", Any)
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def __UpperCAmelCase ( UpperCAmelCase_ : list ) -> Callable[[str], Any]:
'''simple docstring'''
__snake_case : str = {str(UpperCAmelCase_ ): choice for choice in choices}
return lambda UpperCAmelCase_ : str_to_choice.get(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( *,
UpperCAmelCase_ : Union[str, List[str]] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Any = dataclasses.MISSING , UpperCAmelCase_ : Callable[[], Any] = dataclasses.MISSING , UpperCAmelCase_ : dict = None , **UpperCAmelCase_ : str , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case : Optional[Any] = {}
if aliases is not None:
__snake_case : Optional[int] = aliases
if help is not None:
__snake_case : Optional[int] = help
return dataclasses.field(metadata=UpperCAmelCase_ , default=UpperCAmelCase_ , default_factory=UpperCAmelCase_ , **UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Iterable[DataClassType]
def __init__(self : Tuple , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : int) -> int:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__snake_case : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_A)
if dataclasses.is_dataclass(_A):
__snake_case : Optional[int] = [dataclass_types]
__snake_case : Dict = list(_A)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A)
@staticmethod
def _lowercase (_A : ArgumentParser , _A : dataclasses.Field) -> Tuple:
__snake_case : Union[str, Any] = f"--{field.name}"
__snake_case : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _A):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default')
__snake_case : Any = kwargs.pop('aliases' , [])
if isinstance(_A , _A):
__snake_case : Optional[Any] = [aliases]
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
if origin_type is Union or (hasattr(_A , 'UnionType') and isinstance(_A , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(_A) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f" Problem encountered in field '{field.name}'.")
if type(_A) not in field.type.__args__:
# filter `str` in Union
__snake_case : Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case : Optional[int] = getattr(field.type , '__origin__' , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case : Optional[Any] = (
field.type.__args__[0] if isinstance(_A , field.type.__args__[1]) else field.type.__args__[1]
)
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _A) and issubclass(field.type , _A)):
if origin_type is Literal:
__snake_case : Tuple = field.type.__args__
else:
__snake_case : Dict = [x.value for x in field.type]
__snake_case : Dict = make_choice_type_function(kwargs['choices'])
if field.default is not dataclasses.MISSING:
__snake_case : Dict = field.default
else:
__snake_case : Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case : Tuple = copy(_A)
# Hack because type=bool in argparse does not behave as we want.
__snake_case : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case : str = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case : Dict = '?'
# This is the value that will get picked if we do --field_name (without value)
__snake_case : List[str] = True
elif isclass(_A) and issubclass(_A , _A):
__snake_case : str = field.type.__args__[0]
__snake_case : Any = '+'
if field.default_factory is not dataclasses.MISSING:
__snake_case : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case : Any = True
else:
__snake_case : Tuple = field.type
if field.default is not dataclasses.MISSING:
__snake_case : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case : List[Any] = field.default_factory()
else:
__snake_case : Union[str, Any] = True
parser.add_argument(_A , *_A , **_A)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case : List[str] = False
parser.add_argument(f"--no_{field.name}" , action='store_false' , dest=field.name , **_A)
def _lowercase (self : List[Any] , _A : DataClassType) -> Optional[int]:
if hasattr(_A , '_argument_group_name'):
__snake_case : Union[str, Any] = self.add_argument_group(dtype._argument_group_name)
else:
__snake_case : int = self
try:
__snake_case : Dict[str, type] = get_type_hints(_A)
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_A):
__snake_case : Union[str, Any] = '.'.join(map(_A , sys.version_info[:3]))
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.') from ex
raise
for field in dataclasses.fields(_A):
if not field.init:
continue
__snake_case : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(_A , _A)
def _lowercase (self : Union[str, Any] , _A : List[Any]=None , _A : Optional[Any]=False , _A : int=True , _A : List[Any]=None , _A : str=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
__snake_case : Any = []
if args_filename:
args_files.append(Path(_A))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('.args'))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case : int = ArgumentParser()
args_file_parser.add_argument(_A , type=_A , action='append')
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case : int = args_file_parser.parse_known_args(args=_A)
__snake_case : int = vars(_A).get(args_file_flag.lstrip('-') , _A)
if cmd_args_file_paths:
args_files.extend([Path(_A) for p in cmd_args_file_paths])
__snake_case : Optional[int] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case : Tuple = self.parse_known_args(args=_A)
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[Any] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : List[str] = {k: v for k, v in vars(_A).items() if k in keys}
for k in keys:
delattr(_A , _A)
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(_A)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
return (*outputs,)
def _lowercase (self : Tuple , _A : Dict[str, Any] , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : List[Any] = set(args.keys())
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[str] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(_A)}")
return tuple(_A)
def _lowercase (self : int , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
with open(Path(_A) , encoding='utf-8') as open_json_file:
__snake_case : int = json.loads(open_json_file.read())
__snake_case : Optional[int] = self.parse_dict(_A , allow_extra_keys=_A)
return tuple(_A)
def _lowercase (self : List[str] , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : Dict = self.parse_dict(yaml.safe_load(Path(_A).read_text()) , allow_extra_keys=_A)
return tuple(_A)
| 95 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '▁'
_a = {'vocab_file': 'sentencepiece.bpe.model'}
_a = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_a = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : str = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
UpperCAmelCase_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : str = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 61 |
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61 | 1 |
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = StableDiffusionPanoramaPipeline
_lowerCamelCase: int = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
A = DDIMScheduler()
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : List[Any]=0 ) -> List[str]:
A = torch.manual_seed(A_ )
A = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 'french fries'
A = sd_pipe(**A_ ,negative_prompt=A_ )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ,view_batch_size=2 )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' )
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,skip_prk_steps=A_ )
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any]=0 ) -> int:
A = torch.manual_seed(A_ )
A = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' ,safety_checker=A_ )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = 0
def callback_fn(A_ : int ,A_ : int ,A_ : torch.FloatTensor ) -> None:
A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A = latents[0, -3:, -3:, -1]
A = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A = latents[0, -3:, -3:, -1]
A = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A = False
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
pipe(**A_ ,callback=A_ ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A = self.get_inputs()
A = pipe(**A_ )
A = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 74 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''gpt_bigcode'''
_lowerCamelCase: List[Any] = ['''past_key_values''']
_lowerCamelCase: int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Dict=5_0257 ,A_ : Union[str, Any]=1024 ,A_ : str=768 ,A_ : Any=12 ,A_ : Any=12 ,A_ : Optional[int]=None ,A_ : Any="gelu_pytorch_tanh" ,A_ : List[str]=0.1 ,A_ : Optional[int]=0.1 ,A_ : List[str]=0.1 ,A_ : Tuple=1e-5 ,A_ : Optional[int]=0.02 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=5_0256 ,A_ : Union[str, Any]=5_0256 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : Dict=True ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = vocab_size
A = n_positions
A = n_embd
A = n_layer
A = n_head
A = n_inner
A = activation_function
A = resid_pdrop
A = embd_pdrop
A = attn_pdrop
A = layer_norm_epsilon
A = initializer_range
A = scale_attn_weights
A = use_cache
A = attention_softmax_in_fpaa
A = scale_attention_softmax_in_fpaa
A = multi_query
A = bos_token_id
A = eos_token_id
super().__init__(bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) | 74 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = 42
__lowerCAmelCase : Union[str, Any] = 42
class a ( nn.Module ):
__lowerCAmelCase : List[Any] = 42
__lowerCAmelCase : Any = (16, 32, 96, 2_56)
__lowerCAmelCase : str = jnp.floataa
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
snake_case__ : int = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : Tuple = self.block_out_channels[i]
snake_case__ : Any = self.block_out_channels[i + 1]
snake_case__ : Optional[Any] = nn.Conv(
lowerCamelCase_ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowerCamelCase_ )
snake_case__ : Any = nn.Conv(
lowerCamelCase_ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowerCamelCase_ )
snake_case__ : Dict = blocks
snake_case__ : Optional[int] = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Union[str, Any] ,__lowercase :Any ):
snake_case__ : Optional[Any] = self.conv_in(lowerCamelCase_ )
snake_case__ : List[Any] = nn.silu(lowerCamelCase_ )
for block in self.blocks:
snake_case__ : Union[str, Any] = block(lowerCamelCase_ )
snake_case__ : Tuple = nn.silu(lowerCamelCase_ )
snake_case__ : Any = self.conv_out(lowerCamelCase_ )
return embedding
@flax_register_to_config
class a ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : List[Any] = 32
__lowerCAmelCase : Any = 4
__lowerCAmelCase : Dict = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
__lowerCAmelCase : str = False
__lowerCAmelCase : str = (3_20, 6_40, 12_80, 12_80)
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : List[Any] = 8
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : List[Any] = 12_80
__lowerCAmelCase : Any = 0.0
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Tuple = jnp.floataa
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Optional[int] = """rgb"""
__lowerCAmelCase : Optional[int] = (16, 32, 96, 2_56)
def __lowerCamelCase ( self :Dict ,__lowercase :Tuple ):
snake_case__ : str = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : int = jnp.zeros(lowerCamelCase_ ,dtype=jnp.floataa )
snake_case__ : int = jnp.ones((1,) ,dtype=jnp.intaa )
snake_case__ : int = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
snake_case__ : Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : List[str] = jnp.zeros(lowerCamelCase_ ,dtype=jnp.floataa )
snake_case__ , snake_case__ : List[Any] = jax.random.split(lowerCamelCase_ )
snake_case__ : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )["params"]
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[str] = self.block_out_channels
snake_case__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : str = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
snake_case__ : Tuple = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
snake_case__ : List[Any] = FlaxTimestepEmbedding(lowerCamelCase_ ,dtype=self.dtype )
snake_case__ : str = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
snake_case__ : Any = self.only_cross_attention
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
snake_case__ : Any = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
snake_case__ : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Any = []
snake_case__ : List[str] = []
snake_case__ : Any = block_out_channels[0]
snake_case__ : Union[str, Any] = nn.Conv(
lowerCamelCase_ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowerCamelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : Any = output_channel
snake_case__ : str = block_out_channels[i]
snake_case__ : Optional[int] = i == len(lowerCamelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : Dict = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase_ ,out_channels=lowerCamelCase_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
snake_case__ : Union[str, Any] = FlaxDownBlockaD(
in_channels=lowerCamelCase_ ,out_channels=lowerCamelCase_ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowerCamelCase_ )
for _ in range(self.layers_per_block ):
snake_case__ : Optional[Any] = nn.Conv(
lowerCamelCase_ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowerCamelCase_ )
if not is_final_block:
snake_case__ : List[Any] = nn.Conv(
lowerCamelCase_ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowerCamelCase_ )
snake_case__ : Any = down_blocks
snake_case__ : Optional[Any] = controlnet_down_blocks
# mid
snake_case__ : Optional[Any] = block_out_channels[-1]
snake_case__ : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase_ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
snake_case__ : int = nn.Conv(
lowerCamelCase_ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Dict ,__lowercase :List[Any] ,__lowercase :List[str] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :List[str] = 1.0 ,__lowercase :Union[str, Any] = True ,__lowercase :Dict = False ,):
snake_case__ : str = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : str = jnp.flip(lowerCamelCase_ ,axis=1 )
# 1. time
if not isinstance(lowerCamelCase_ ,jnp.ndarray ):
snake_case__ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowerCamelCase_ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
snake_case__ : Tuple = jnp.expand_dims(lowerCamelCase_ ,0 )
snake_case__ : Optional[Any] = self.time_proj(lowerCamelCase_ )
snake_case__ : int = self.time_embedding(lowerCamelCase_ )
# 2. pre-process
snake_case__ : int = jnp.transpose(lowerCamelCase_ ,(0, 2, 3, 1) )
snake_case__ : Tuple = self.conv_in(lowerCamelCase_ )
snake_case__ : Any = jnp.transpose(lowerCamelCase_ ,(0, 2, 3, 1) )
snake_case__ : Tuple = self.controlnet_cond_embedding(lowerCamelCase_ )
sample += controlnet_cond
# 3. down
snake_case__ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
snake_case__ , snake_case__ : Union[str, Any] = down_block(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,deterministic=not train )
else:
snake_case__ , snake_case__ : List[str] = down_block(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : int = self.mid_block(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,deterministic=not train )
# 5. contronet blocks
snake_case__ : Dict = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase_ ,self.controlnet_down_blocks ):
snake_case__ : Any = controlnet_block(lowerCamelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Optional[int] = controlnet_down_block_res_samples
snake_case__ : Dict = self.controlnet_mid_block(lowerCamelCase_ )
# 6. scaling
snake_case__ : str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase_ ,mid_block_res_sample=lowerCamelCase_ )
| 354 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
snake_case__ : str = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
A__ = input('''Enter a string ''').strip()
A__ = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 44 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_lowerCAmelCase : Any = TypeVar("KEY")
_lowerCAmelCase : Union[str, Any] = TypeVar("VAL")
@dataclass(frozen=lowerCAmelCase , slots=lowerCAmelCase )
class _UpperCamelCase ( Generic[KEY, VAL] ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
class _UpperCamelCase ( _Item ):
def __init__( self :int ) -> None:
super().__init__(lowerCamelCase , lowerCamelCase )
def __bool__( self :int ) -> bool:
return False
_lowerCAmelCase : str = _DeletedItem()
class _UpperCamelCase ( MutableMapping[KEY, VAL] ):
def __init__( self :Dict , lowerCamelCase :int = 8 , lowerCamelCase :float = 0.75 ) -> None:
UpperCAmelCase__ = initial_block_size
UpperCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
UpperCAmelCase__ = capacity_factor
UpperCAmelCase__ = 0
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :KEY ) -> int:
return hash(lowerCamelCase ) % len(self._buckets )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :int ) -> int:
return (ind + 1) % len(self._buckets )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :int , lowerCamelCase :KEY , lowerCamelCase :VAL ) -> bool:
UpperCAmelCase__ = self._buckets[ind]
if not stored:
UpperCAmelCase__ = _Item(lowerCamelCase , lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
UpperCAmelCase__ = _Item(lowerCamelCase , lowerCamelCase )
return True
else:
return False
def UpperCAmelCase_ ( self :Dict ) -> bool:
UpperCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
UpperCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase_ ( self :str , lowerCamelCase :int ) -> None:
UpperCAmelCase__ = self._buckets
UpperCAmelCase__ = [None] * new_size
UpperCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase_ ( self :Tuple ) -> None:
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase_ ( self :Any ) -> None:
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :KEY ) -> Iterator[int]:
UpperCAmelCase__ = self._get_bucket_index(lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
UpperCAmelCase__ = self._get_next_ind(lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :KEY , lowerCamelCase :VAL ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
if self._try_set(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
break
def __setitem__( self :Dict , lowerCamelCase :KEY , lowerCamelCase :VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase , lowerCamelCase )
def __delitem__( self :Optional[Any] , lowerCamelCase :KEY ) -> None:
for ind in self._iterate_buckets(lowerCamelCase ):
UpperCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
UpperCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[Any] , lowerCamelCase :KEY ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase ):
UpperCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase )
def __len__( self :str ) -> int:
return self._len
def __iter__( self :List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self :Tuple ) -> str:
UpperCAmelCase__ = " ,".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 169 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """realm"""
def __init__( self :str , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :Optional[int]=768 , lowerCamelCase :Any=128 , lowerCamelCase :Tuple=12 , lowerCamelCase :str=12 , lowerCamelCase :List[str]=8 , lowerCamelCase :List[str]=3072 , lowerCamelCase :List[str]="gelu_new" , lowerCamelCase :int=0.1 , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :str=0.02 , lowerCamelCase :Tuple=1e-12 , lowerCamelCase :Dict=256 , lowerCamelCase :int=10 , lowerCamelCase :List[str]=1e-3 , lowerCamelCase :str=5 , lowerCamelCase :Optional[int]=320 , lowerCamelCase :Union[str, Any]=1335_3718 , lowerCamelCase :str=5000 , lowerCamelCase :str=1 , lowerCamelCase :List[Any]=0 , lowerCamelCase :Tuple=2 , **lowerCamelCase :Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
# Common config
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = retriever_proj_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = num_candidates
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = layer_norm_eps
# Reader config
UpperCAmelCase__ = span_hidden_size
UpperCAmelCase__ = max_span_width
UpperCAmelCase__ = reader_layer_norm_eps
UpperCAmelCase__ = reader_beam_size
UpperCAmelCase__ = reader_seq_len
# Retrieval config
UpperCAmelCase__ = num_block_records
UpperCAmelCase__ = searcher_beam_size
| 169 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ ='xlm-roberta'
def __init__( self : List[Any] , snake_case__ : str=3_0_5_2_2 , snake_case__ : str=7_6_8 , snake_case__ : List[str]=1_2 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Dict=3_0_7_2 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Optional[Any]=2 , snake_case__ : List[str]=0.02 , snake_case__ : Union[str, Any]=1e-12 , snake_case__ : Union[str, Any]=1 , snake_case__ : Optional[int]=0 , snake_case__ : List[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=None , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : str = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Optional[Any] = classifier_dropout
class lowerCAmelCase__ ( lowerCamelCase__ ):
@property
def __a ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 356 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Tuple =logging.get_logger(__name__)
_A : Any ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[int] ={
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_A : Dict ={
'''gpt2''': 1_024,
'''gpt2-medium''': 1_024,
'''gpt2-large''': 1_024,
'''gpt2-xl''': 1_024,
'''distilgpt2''': 1_024,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = GPTaTokenizer
def __init__( self: Any , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[Any]="<|endoftext|>" , UpperCamelCase__: Tuple="<|endoftext|>" , UpperCamelCase__: int="<|endoftext|>" , UpperCamelCase__: str=False , **UpperCamelCase__: Any , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = kwargs.pop("""add_bos_token""" , UpperCamelCase__ )
lowerCamelCase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Optional[int] = add_prefix_space
lowerCamelCase__ : Dict = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = add_prefix_space
def lowerCamelCase_ ( self: str , *UpperCamelCase__: str , **UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , *UpperCamelCase__: List[str] , **UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Tuple = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 41 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :Optional[Any] = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 94 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : str = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : Tuple = get_logger(__name__)
lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md"""
lowercase : Dict = uuida().hex
lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str:
lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if token is None:
lowercase : Optional[int] = HfFolder.get_token()
if organization is None:
lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None
lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : Optional[Any] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None:
if new_cache_dir is None:
lowercase : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Any = 0
else:
with open(cache_version_file) as f:
try:
lowercase : List[Any] = int(f.read())
except ValueError:
lowercase : int = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
if variant is not None:
lowercase : List[str] = weights_name.split(""".""" )
lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _snake_case( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase : Any = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 285 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Tuple = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'convnextv2'
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , )-> Dict:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = num_stages
__UpperCamelCase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__UpperCamelCase = [3, 3, 9, 3] if depths is None else depths
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = drop_path_rate
__UpperCamelCase = image_size
__UpperCamelCase = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 328 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
lowercase__ : Optional[Any] = ["names", "prefix"]
lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"]
lowercase__ : List[str] = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
"""simple docstring"""
_snake_case = ","
_snake_case = None
_snake_case = "infer"
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = False
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = None
_snake_case = "."
_snake_case = None
_snake_case = '"'
_snake_case = 0
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = 0
_snake_case = True
_snake_case = False
_snake_case = None
_snake_case = 10000
_snake_case = None
_snake_case = "strict"
_snake_case = "error"
_snake_case = None
def A__ ( self )-> Any:
'''simple docstring'''
if self.delimiter is not None:
__UpperCamelCase = self.delimiter
if self.column_names is not None:
__UpperCamelCase = self.column_names
@property
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case = CsvConfig
def A__ ( self )-> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
__UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
__UpperCamelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__UpperCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ):
# cheaper cast
__UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return pa_table
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__UpperCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
__UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" )
raise
| 328 | 1 |
"""simple docstring"""
__A = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__A = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__A = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 254 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = IFInpaintingSuperResolutionPipeline
__magic_name__ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__magic_name__ :Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Dict = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self ):
'''simple docstring'''
self._test_save_load_local()
def snake_case ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 254 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' ,[
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] ,)
def __lowercase ( snake_case_ : Dict ,snake_case_ : Union[str, Any] ) ->Dict:
'''simple docstring'''
__A : Union[str, Any] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' ,'''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' ,'''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' ,'''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
__A : Union[str, Any] = DatasetInfosDict.from_directory(snake_case_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' ,[
DatasetInfo(),
DatasetInfo(
description='''foo''' ,features=Features({'''a''': Value('''int32''' )} ) ,builder_name='''builder''' ,config_name='''config''' ,version='''1.0.0''' ,splits=[{'''name''': '''train'''}] ,download_size=42 ,),
] ,)
def __lowercase ( snake_case_ : Dict ,snake_case_ : DatasetInfo ) ->Any:
'''simple docstring'''
__A : int = str(snake_case_ )
dataset_info.write_to_directory(snake_case_ )
__A : Optional[int] = DatasetInfo.from_directory(snake_case_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case_ ,'''dataset_info.json''' ) )
def __lowercase ( ) ->List[str]:
'''simple docstring'''
__A : Tuple = DatasetInfo(
description='''foo''' ,citation='''bar''' ,homepage='''https://foo.bar''' ,license='''CC0''' ,features=Features({'''a''': Value('''int32''' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='''builder''' ,config_name='''config''' ,version='''1.0.0''' ,splits=[{'''name''': '''train''', '''num_examples''': 42}] ,download_checksums={} ,download_size=1337 ,post_processing_size=442 ,dataset_size=1234 ,size_in_bytes=1337 + 442 + 1234 ,)
__A : List[str] = dataset_info._to_yaml_dict()
assert sorted(snake_case_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
__A : Any = yaml.safe_dump(snake_case_ )
__A : List[str] = yaml.safe_load(snake_case_ )
assert dataset_info_yaml_dict == reloaded
def __lowercase ( ) ->Union[str, Any]:
'''simple docstring'''
__A : int = DatasetInfo()
__A : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' ,[
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' ,features=Features({'''a''': Value('''int32''' )} ) ,builder_name='''builder''' ,config_name='''config''' ,version='''1.0.0''' ,splits=[{'''name''': '''train'''}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] ,)
def __lowercase ( snake_case_ : int ,snake_case_ : DatasetInfosDict ) ->Any:
'''simple docstring'''
__A : Optional[int] = str(snake_case_ )
dataset_infos_dict.write_to_directory(snake_case_ )
__A : Union[str, Any] = DatasetInfosDict.from_directory(snake_case_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__A : Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__A : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case_ ,'''README.md''' ) )
| 291 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 182 | import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int(input_a == input_a == 0 )
def a__ ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 371 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Dict = original_name.split("." )[0]
__lowerCAmelCase: Any = key.split("." )
__lowerCAmelCase: Union[str, Any] = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 2] )
__lowerCAmelCase: List[Any] = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 1] )
__lowerCAmelCase: List[str] = orig_block_num - offset
__lowerCAmelCase: Tuple = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[Any] = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase: Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase: int = key[: key.find("proj" )]
__lowerCAmelCase: Dict = key.replace(__SCREAMING_SNAKE_CASE , F"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase: Optional[int] = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase: int = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase: Optional[Any] = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase: Dict = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase: Dict = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase: Dict = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase: Optional[int] = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase: Any = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase: int = key.replace("head" , "classifier" )
__lowerCAmelCase: Tuple = value
return new_state_dict
def a__ ( ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase: int = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Any = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase: Any = "huggingface/label-files"
__lowerCAmelCase: int = model_name[-3:]
__lowerCAmelCase: List[Any] = 1_0_0_0
__lowerCAmelCase: Tuple = "imagenet-1k-id2label.json"
__lowerCAmelCase: str = (1, 1_0_0_0)
# set config attributes
__lowerCAmelCase: Dict = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase: List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase: Any = idalabel
__lowerCAmelCase: Any = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase: Dict = [2, 2, 6, 2]
__lowerCAmelCase: str = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase: Optional[Any] = 4.0
__lowerCAmelCase: Union[str, Any] = 0.9
elif size == "s24":
__lowerCAmelCase: Tuple = [4, 4, 1_2, 4]
__lowerCAmelCase: List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase: Tuple = 4.0
__lowerCAmelCase: Optional[int] = 0.9
elif size == "s36":
__lowerCAmelCase: int = [6, 6, 1_8, 6]
__lowerCAmelCase: int = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase: List[str] = 4.0
__lowerCAmelCase: Dict = 1E-6
__lowerCAmelCase: List[Any] = 0.9
elif size == "m36":
__lowerCAmelCase: Dict = [6, 6, 1_8, 6]
__lowerCAmelCase: Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase: str = 4.0
__lowerCAmelCase: Union[str, Any] = 1E-6
__lowerCAmelCase: Union[str, Any] = 0.95
elif size == "m48":
__lowerCAmelCase: str = [8, 8, 2_4, 8]
__lowerCAmelCase: Optional[int] = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase: str = 4.0
__lowerCAmelCase: int = 1E-6
__lowerCAmelCase: str = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
__lowerCAmelCase: Union[str, Any] = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE )
# Prepare image
__lowerCAmelCase: int = prepare_img()
__lowerCAmelCase: Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase: Optional[int] = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase: Any = rename_keys(__SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
__lowerCAmelCase: str = PoolFormerForImageClassification(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
__lowerCAmelCase: Any = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase: int = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase: List[str] = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__lowerCAmelCase: Optional[int] = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__lowerCAmelCase: List[str] = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__lowerCAmelCase: Union[str, Any] = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__lowerCAmelCase: List[str] = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 108 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 200 ) -> int:
_a = [1, 2, 5, 10, 20, 50, 100, 200]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 63 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
import math
from collections.abc import Callable
def __A ( a_ :Callable[[float], float] , a_ :float , a_ :float) -> float:
__a : float = xa
__a : float = xa
while True:
if x_n == x_na or function(a_) == function(a_):
raise ZeroDivisionError('''float division by zero, could not find root''')
__a : float = x_na - (
function(a_) / ((function(a_) - function(a_)) / (x_na - x_n))
)
if abs(x_na - x_na) < 10**-5:
return x_na
__a : int = x_na
__a : Dict = x_na
def __A ( a_ :float) -> float:
return math.pow(a_ , 3) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 354 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.config_name == "default":
__a : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if gpus is None:
__a : str = 1 if torch.cuda.is_available() else 0
__a : Optional[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__a : Dict = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
__a , __a : int = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores} | 188 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Optional[int] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__ , id=UpperCamelCase__) | 76 | """simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> int:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> Union[str, Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 221 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
A = sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = tempfile.mkdtemp()
A = BlipImageProcessor()
A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
A = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
processor.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self : Dict , **__SCREAMING_SNAKE_CASE : Any):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).tokenizer
def SCREAMING_SNAKE_CASE__ (self : Tuple , **__SCREAMING_SNAKE_CASE : int):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).image_processor
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
A = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ (self : Any):
A = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
A = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
A = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = self.prepare_image_inputs()
A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np")
A = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "lower newer"
A = processor(text=__SCREAMING_SNAKE_CASE)
A = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "lower newer"
A = self.prepare_image_inputs()
A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(__SCREAMING_SNAKE_CASE)
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "lower newer"
A = self.prepare_image_inputs()
A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
| 57 | 1 |
'''simple docstring'''
from math import ceil
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = list(range(0 , _lowerCAmelCase ) )
snake_case__ : str = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
snake_case__ : str = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCAmelCase )
# Missing blocks
snake_case__ : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
snake_case__ : Dict = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : str = list(range(_lowerCAmelCase ) )
snake_case__ : Dict = int(ceil(n_layers / len(_lowerCAmelCase ) ) )
snake_case__ : Optional[int] = [layers[i : i + n_blocks] for i in range(0 , _lowerCAmelCase , _lowerCAmelCase )]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
| 35 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = BarthezTokenizer
lowercase = BarthezTokenizerFast
lowercase = True
lowercase = True
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
UpperCamelCase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = """<pad>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 101122 )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = """I was born in 92000, and this is falsé."""
UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_SCREAMING_SNAKE_CASE , )
| 183 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( )-> Tuple:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(__UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def A__ ( self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCamelCase = torch.device("""cpu""" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> str:
"""simple docstring"""
return False
| 183 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def lowercase ( __snake_case : Tuple , __snake_case : Union[str, Any] ):
lowercase_ : str = a.name
lowercase_ : List[str] = b.name
lowercase_ : List[str] = ''''''
lowercase_ : int = ''''''
lowercase_ : Any = a == b
lowercase_ : Dict = name_a
lowercase_ : Union[str, Any] = name_b
return res
def lowercase ( __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__snake_case , __snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __snake_case , __snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , __snake_case , __snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __snake_case , __snake_case )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
for n in graph_proto.node:
_node_replace_input_with(__snake_case , __snake_case , __snake_case )
def lowercase ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
lowercase_ : Any = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : str = inits[i].name
lowercase_ : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __snake_case , __snake_case )
def lowercase ( __snake_case : Union[str, Any] ):
lowercase_ : str = os.path.dirname(__snake_case )
lowercase_ : Optional[int] = os.path.basename(__snake_case )
lowercase_ : Optional[Any] = onnx.load(os.path.join(__snake_case , __snake_case ) )
lowercase_ : Any = list(model.graph.initializer )
lowercase_ : List[Any] = set()
lowercase_ : List[Any] = {}
lowercase_ : List[str] = []
lowercase_ : Optional[int] = 0
for i in range(len(__snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__snake_case )
dup_set.add(__snake_case )
lowercase_ : Dict = inits[j].data_type
lowercase_ : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('''unexpected data type: ''' , __snake_case )
total_reduced_size += mem_size
lowercase_ : List[Any] = inits[i].name
lowercase_ : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__snake_case )
else:
lowercase_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , '''GB''' )
lowercase_ : str = sorted(__snake_case )
_remove_dup_initializers_from_model(__snake_case , __snake_case , __snake_case )
lowercase_ : Dict = '''optimized_''' + model_file_name
lowercase_ : List[Any] = os.path.join(__snake_case , __snake_case )
onnx.save(__snake_case , __snake_case )
return new_model
| 33 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """marian"""
_lowerCAmelCase = ["""past_key_values"""]
_lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __magic_name__=5_81_01 , __magic_name__=None , __magic_name__=10_24 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__=True , __magic_name__="gelu" , __magic_name__=10_24 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=5_81_00 , __magic_name__=False , __magic_name__=5_81_00 , __magic_name__=0 , __magic_name__=0 , __magic_name__=True , **__magic_name__ , ) -> str:
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class a ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a = {0: 'batch'}
_a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a = {0: 'batch', 1: 'decoder_sequence'}
_a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__magic_name__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
_a = common_inputs['decoder_input_ids'].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__magic_name__ , __magic_name__ )
_a = max(__magic_name__ , __magic_name__ ) - min_num_layers
_a = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['attention_mask'].dtype
_a = torch.cat(
[common_inputs['attention_mask'], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
_a = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
_a = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
| 168 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_a : str= HUGGINGFACE_HUB_CACHE
_a : List[str]= "config.json"
_a : int= "diffusion_pytorch_model.bin"
_a : Optional[Any]= "diffusion_flax_model.msgpack"
_a : List[str]= "model.onnx"
_a : Optional[int]= "diffusion_pytorch_model.safetensors"
_a : int= "weights.pb"
_a : Union[str, Any]= "https://huggingface.co"
_a : int= default_cache_path
_a : str= "diffusers_modules"
_a : Union[str, Any]= os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_a : Optional[Any]= ["fp16", "non-ema"]
_a : Dict= ".self_attn"
| 95 | """simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__snake_case : Optional[int] = iter(UpperCAmelCase_ )
while True:
__snake_case : Optional[int] = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Any = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : Union[str, Any] = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
__snake_case : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : str = generate_table(UpperCAmelCase_ )
__snake_case : Union[str, Any] = prepare_input(UpperCAmelCase_ )
__snake_case : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = generate_table(UpperCAmelCase_ )
__snake_case : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 95 | 1 |
__A : Union[str, Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __UpperCamelCase ( _A : dict , _A : Tuple , _A : Any ) ->list[str]:
"""simple docstring"""
lowerCamelCase_ =set()
# keep track of all the paths to be checked
lowerCamelCase_ =[[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase_ =queue.pop(0 )
# get the last node from the path
lowerCamelCase_ =path[-1]
if node not in explored:
lowerCamelCase_ =graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase_ =list(_A )
new_path.append(_A )
queue.append(_A )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_A )
# in case there's no path between the 2 nodes
return []
def __UpperCamelCase ( _A : dict , _A : Any , _A : Tuple ) ->int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase_ =[start]
lowerCamelCase_ =set(_A )
# Keep tab on distances from `start` node.
lowerCamelCase_ ={start: 0, target: -1}
while queue:
lowerCamelCase_ =queue.pop(0 )
if node == target:
lowerCamelCase_ =(
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_A )
queue.append(_A )
lowerCamelCase_ =dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 154 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 64 | """simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( _lowerCamelCase: np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: np.ndarray ) -> Any:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = v.conjugate().T
__lowerCamelCase : Any = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase : List[str] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowerCamelCase : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
__lowerCamelCase : Dict = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 64 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : Dict = process
UpperCAmelCase_ : List[Any] = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.dataset[i]
UpperCAmelCase_ : Tuple = self.process(lowercase_ , **self.params )
return processed
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = loader
UpperCAmelCase_ : Optional[int] = infer
UpperCAmelCase_ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : str = loader_batch_size
# Internal bookkeeping
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = iter(self.loader )
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase_ : str = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase_ : List[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_ ):
# Convert ModelOutput to tuple first
UpperCAmelCase_ : List[str] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ : int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase_ : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ : str = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase_ : str = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase_ : Union[str, Any] = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase_ : Dict = next(self.iterator )
UpperCAmelCase_ : Union[str, Any] = self.infer(lowercase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase_ : Union[str, Any] = processed
else:
UpperCAmelCase_ : str = list(processed.keys() )[0]
UpperCAmelCase_ : Optional[Any] = processed[key]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : str = len(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ : str = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase_ : Dict = processed
UpperCAmelCase_ : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ):
"""simple docstring"""
super().__init__(lowercase_ , lowercase_ , lowercase_ )
def __iter__( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = iter(self.loader )
UpperCAmelCase_ : Dict = None
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.subiterator is None:
UpperCAmelCase_ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase_ : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase_ : Tuple = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase_ : Dict = next(self.subiterator )
return processed
class A_ (lowercase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = iter(self.loader )
return self
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ : Union[str, Any] = self.loader_batch_item()
UpperCAmelCase_ : Dict = item.pop("is_last" )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase_ : Optional[int] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = processed
else:
UpperCAmelCase_ : Any = list(processed.keys() )[0]
UpperCAmelCase_ : str = processed[key]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = len(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ : Tuple = observed_batch_size
UpperCAmelCase_ : List[Any] = processed
UpperCAmelCase_ : List[Any] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ : Dict = self.loader_batch_item()
UpperCAmelCase_ : Dict = item.pop("is_last" )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
UpperCAmelCase_ : List[Any] = processed
UpperCAmelCase_ : Optional[Any] = item.pop("is_last" )
accumulator.append(lowercase_ )
return accumulator
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = dataset
UpperCAmelCase_ : Dict = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
return self.dataset[i][self.key]
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Any = keya
UpperCAmelCase_ : str = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 61 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A_ :
'''simple docstring'''
pass
| 61 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[Any] ,A : List[str] ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : Optional[int] ,A : int = 1 ,A : int = 1_00 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[float] = None ,A : bool = True ,):
if audio_length_in_s is None:
__A = self.unet.config.sample_size / self.unet.config.sample_rate
__A = audio_length_in_s * self.unet.config.sample_rate
__A = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__A = int(A )
if sample_size % down_scale_factor != 0:
__A = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
__A = int(A )
__A = next(iter(self.unet.parameters() ) ).dtype
__A = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__A = randn_tensor(A ,generator=A ,device=self.device ,dtype=A )
# set step values
self.scheduler.set_timesteps(A ,device=audio.device )
__A = self.scheduler.timesteps.to(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(A ,A ).sample
# 2. compute previous image: x_t -> t_t-1
__A = self.scheduler.step(A ,A ,A ).prev_sample
__A = audio.clamp(-1 ,1 ).float().cpu().numpy()
__A = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A )
| 124 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,A : Optional[Any] ,A : List[Any] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : Tuple ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : float = 0.0 ,A : int = 50 ,A : Optional[bool] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,A ):
__A = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__A = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__A = randn_tensor(A ,generator=A ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(A ,A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
A ,A ,A ,eta=A ,use_clipped_model_output=A ,generator=A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 ,1 )
__A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ : Any = datasets.logging.get_logger(__name__)
A__ : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A__ : Optional[Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A__ : str = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Dict=True ,__UpperCamelCase : Optional[int]=False ,__UpperCamelCase : str="dummy_doc" ):
lowerCAmelCase_ : Any = {doc: key_lines}
lowerCAmelCase_ : Optional[int] = {doc: sys_lines}
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase_ : int = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Dict = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ ,sys_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase_ : List[Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if remove_nested:
lowerCAmelCase_ : List[str] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase_ : Optional[int] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase_ : Any = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ):
lowerCAmelCase_ : str = get_coref_infos(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = 0
for name, metric in metrics:
lowerCAmelCase_ : Any = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) ,f"""Recall: {recall * 100:.2f}""" ,f""" Precision: {precision * 100:.2f}""" ,f""" F1: {fa * 100:.2f}""" ,)
if conll_subparts_num == 3:
lowerCAmelCase_ : Union[str, Any] = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def UpperCamelCase( __UpperCamelCase : List[Any] ):
lowerCAmelCase_ : List[Any] = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCAmelCase_ : Tuple = line.split()[5]
if not parse_col == "-":
lowerCAmelCase_ : Union[str, Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self : List[Any]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self : str , A_ : Optional[Any] , A_ : Tuple , A_ : Union[str, Any]=True , A_ : Dict=False , A_ : Any=False , A_ : Union[str, Any]=False):
lowerCAmelCase_ : int = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowerCAmelCase_ : int = util.check_gold_parse_annotation(A_)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase_ : int = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 103 |
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int:
try:
__snake_case: List[str] = int(SCREAMING_SNAKE_CASE__)
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""") from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""")
__snake_case: list[int] = []
__snake_case: List[str] = 2
while len(SCREAMING_SNAKE_CASE__) < nth:
if is_prime(SCREAMING_SNAKE_CASE__):
primes.append(SCREAMING_SNAKE_CASE__)
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 111 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __A ( UpperCAmelCase : List[str] ):
raise NotImplementedError()
@abstractmethod
def __A ( self : Tuple ):
raise NotImplementedError() | 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) | 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __a ( __UpperCAmelCase ):
__lowercase : Optional[Any] = '''cvt'''
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[64, 192, 384] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 10] , lowerCAmelCase__=[4.0, 4.0, 4.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=[0.0, 0.0, 0.1] , lowerCAmelCase__=[True, True, True] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__=[3, 3, 3] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=[1, 1, 1] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: str = num_channels
lowercase__: Dict = patch_sizes
lowercase__: Union[str, Any] = patch_stride
lowercase__: Any = patch_padding
lowercase__: Tuple = embed_dim
lowercase__: Union[str, Any] = num_heads
lowercase__: Any = depth
lowercase__: List[str] = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: str = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Tuple = cls_token
lowercase__: List[Any] = qkv_projection_method
lowercase__: Dict = kernel_qkv
lowercase__: Dict = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: Dict = padding_q
lowercase__: Optional[int] = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 196 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes'))
self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads'))
self.parent.assertTrue(hasattr(lowercase , 'num_encoder_blocks'))
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : int = batch_size
a__ : Tuple = image_size
a__ : Union[str, Any] = num_channels
a__ : str = num_encoder_blocks
a__ : Dict = sr_ratios
a__ : Dict = depths
a__ : Union[str, Any] = hidden_sizes
a__ : str = downsampling_rates
a__ : Tuple = num_attention_heads
a__ : Optional[Any] = is_training
a__ : Union[str, Any] = use_labels
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Optional[Any] = initializer_range
a__ : Tuple = num_labels
a__ : Union[str, Any] = scope
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : str = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Any:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = SegformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = model(lowercase)
a__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.num_labels
a__ : List[str] = SegformerForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__ : List[str] = model(lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
a__ : int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = 1
a__ : Optional[int] = SegformerForSemanticSegmentation(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase)
a__ : Optional[Any] = model(lowercase , labels=lowercase)
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : str = config_and_inputs
a__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__A : List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[str] = True
__A : Any = False
__A : Any = False
__A : str = False
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = SegformerModelTester(self)
a__ : Optional[Any] = SegformerConfigTester(self , config_class=lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase)
@unittest.skip('SegFormer does not use inputs_embeds')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowercase)
a__ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
a__ : str = True
a__ : List[str] = False
a__ : int = True
a__ : List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
a__ : Dict = sum(self.model_tester.depths)
self.assertEqual(len(lowercase) , lowercase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : Dict = True
a__ : int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : Tuple = (self.model_tester.image_size // 4) ** 2
a__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a__ : str = (self.model_tester.image_size // 32) ** 2
a__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a__ : Dict = len(lowercase)
# Check attention is always last and order is fine
a__ : List[Any] = True
a__ : Any = True
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__ : int = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : List[Any] = (self.model_tester.image_size // 4) ** 2
a__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Union[str, Any] = outputs.hidden_states
a__ : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase) , lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : int = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase):
continue
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.train()
a__ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Optional[int] = model(**lowercase).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = SegformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Optional[int] = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[int] = model(lowercase)
a__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(lowercase)
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[Any] = model(lowercase)
a__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-1))
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Any = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt')
a__ : Optional[int] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Union[str, Any] = model(lowercase)
a__ : int = outputs.logits.detach().cpu()
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)])
a__ : Optional[Any] = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , lowercase)
a__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowercase)
a__ : Union[str, Any] = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , lowercase)
| 99 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A :
def __init__(self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=9_9 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = TFRoFormerForCausalLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[Any] = False
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
UpperCAmelCase__ = 5_0_0_0_0
UpperCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Tuple = 1E-4
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tf.constant([[4, 1_0]] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase__ = emba(input_ids.shape )
UpperCAmelCase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
UpperCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = 1E-4
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
UpperCAmelCase__ = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
UpperCAmelCase__ , UpperCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 365 | # Lint as: python3
import itertools
import os
import re
UpperCamelCase__ = re.compile(R'([A-Z]+)([A-Z][a-z])')
UpperCamelCase__ = re.compile(R'([a-z\d])([A-Z])')
UpperCamelCase__ = re.compile(R'(?<!_)_(?!_)')
UpperCamelCase__ = re.compile(R'(_{2,})')
UpperCamelCase__ = R'^\w+(\.\w+)*$'
UpperCamelCase__ = R'<>:/\|?*'
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = _uppercase_uppercase_re.sub(r"\1_\2", __A )
UpperCAmelCase__ = _lowercase_uppercase_re.sub(r"\1_\2", __A )
return name.lower()
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = _single_underscore_re.split(__A )
UpperCAmelCase__ = [_multiple_underscores_re.split(__A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__A ) if n != "" )
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(__A )
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re, __A ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(__A )}-{split}"""
def lowerCAmelCase_ ( __A, __A, __A, __A=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = filename_prefix_for_split(__A, __A )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
UpperCAmelCase__ = os.path.join(__A, __A )
return f"""{filepath}*"""
def lowerCAmelCase_ ( __A, __A, __A, __A=None, __A=None ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = filename_prefix_for_split(__A, __A )
UpperCAmelCase__ = os.path.join(__A, __A )
if shard_lengths:
UpperCAmelCase__ = len(__A )
UpperCAmelCase__ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(__A )]
if filetype_suffix:
UpperCAmelCase__ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase__ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 143 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowercase ( ) -> List[str]:
__lowerCAmelCase : List[str] = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
__lowerCAmelCase : Dict = Image.open(requests.get(UpperCamelCase__ ,stream=UpperCamelCase__ ).raw ).convert("RGB" )
return image
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : int = dct.pop(UpperCamelCase__ )
__lowerCAmelCase : Optional[int] = val
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCAmelCase : Tuple = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__lowerCAmelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__lowerCAmelCase : str = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ ,requires_grad=UpperCamelCase__ ), v_bias) )
__lowerCAmelCase : List[Any] = qkv_bias
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : Optional[Any] = 364 if '''coco''' in model_name else 224
__lowerCAmelCase : int = InstructBlipVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__lowerCAmelCase : str = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCAmelCase : Any = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__lowerCAmelCase : str = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" ,vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
__lowerCAmelCase : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" ,vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__lowerCAmelCase : Union[str, Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
__lowerCAmelCase : Tuple = InstructBlipConfig(vision_config=UpperCamelCase__ ,text_config=UpperCamelCase__ ,qformer_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=False ) -> List[str]:
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-uncased" ,truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
__lowerCAmelCase : Optional[int] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" ,truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__lowerCAmelCase : int = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" ,truncation_side="left" ,bos_token="</s>" ,unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
__lowerCAmelCase : Optional[int] = get_blipa_config(UpperCamelCase__ )
__lowerCAmelCase : Dict = InstructBlipForConditionalGeneration(UpperCamelCase__ ).eval()
__lowerCAmelCase : Union[str, Any] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
__lowerCAmelCase : Dict = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__lowerCAmelCase : Dict = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
__lowerCAmelCase : Optional[int] = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
__lowerCAmelCase : List[str] = load_model_and_preprocess(
name=UpperCamelCase__ ,model_type=UpperCamelCase__ ,is_eval=UpperCamelCase__ ,device=UpperCamelCase__ )
original_model.eval()
print("Done!" )
# update state dict keys
__lowerCAmelCase : Any = original_model.state_dict()
__lowerCAmelCase : List[Any] = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCAmelCase : Optional[int] = state_dict.pop(UpperCamelCase__ )
if key.startswith("Qformer.bert" ):
__lowerCAmelCase : Optional[Any] = key.replace("Qformer.bert" ,"qformer" )
if "attention.self" in key:
__lowerCAmelCase : Dict = key.replace("self" ,"attention" )
if "llm_proj" in key:
__lowerCAmelCase : Any = key.replace("llm_proj" ,"language_projection" )
if "t5_proj" in key:
__lowerCAmelCase : Tuple = key.replace("t5_proj" ,"language_projection" )
if key.startswith("llm_model" ):
__lowerCAmelCase : int = key.replace("llm_model" ,"language_model" )
if key.startswith("t5" ):
__lowerCAmelCase : Union[str, Any] = key.replace("t5" ,"language" )
__lowerCAmelCase : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ ,UpperCamelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase__ ,strict=UpperCamelCase__ )
__lowerCAmelCase : Optional[Any] = load_demo_image()
__lowerCAmelCase : Union[str, Any] = '''What is unusual about this image?'''
# create processor
__lowerCAmelCase : List[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} ,image_mean=UpperCamelCase__ ,image_std=UpperCamelCase__ )
__lowerCAmelCase : List[str] = InstructBlipProcessor(
image_processor=UpperCamelCase__ ,tokenizer=UpperCamelCase__ ,qformer_tokenizer=UpperCamelCase__ ,)
__lowerCAmelCase : List[Any] = processor(images=UpperCamelCase__ ,text=UpperCamelCase__ ,return_tensors="pt" ).to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
__lowerCAmelCase : int = vis_processors['''eval'''](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
__lowerCAmelCase : str = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
__lowerCAmelCase : Optional[int] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
__lowerCAmelCase : Tuple = hf_model(**UpperCamelCase__ ).logits
else:
__lowerCAmelCase : Tuple = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
__lowerCAmelCase : int = tokenizer("\n" ,return_tensors="pt" ).input_ids.to(UpperCamelCase__ )
__lowerCAmelCase : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
__lowerCAmelCase : Optional[int] = hf_model(**UpperCamelCase__ ,labels=UpperCamelCase__ ).logits
print("First values of original logits:" ,original_logits[0, :3, :3] )
print("First values of HF logits:" ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__lowerCAmelCase : Tuple = 1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,UpperCamelCase__ ,atol=UpperCamelCase__ )
print("Looks ok!" )
print("Generating with original model..." )
__lowerCAmelCase : Optional[int] = original_model.generate({"image": original_pixel_values, "prompt": prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
__lowerCAmelCase : Tuple = hf_model.generate(
**UpperCamelCase__ ,do_sample=UpperCamelCase__ ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__lowerCAmelCase : List[Any] = 2
print("Original generation:" ,UpperCamelCase__ )
__lowerCAmelCase : List[str] = processor.batch_decode(UpperCamelCase__ ,skip_special_tokens=UpperCamelCase__ )
__lowerCAmelCase : Optional[Any] = [text.strip() for text in output_text]
print("HF generation:" ,UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
__snake_case : Optional[Any] = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__snake_case : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 269 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"{test_file} instead." )
SCREAMING_SNAKE_CASE__ : str =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
SCREAMING_SNAKE_CASE__ : Any =components[:-1] + [test_fn.replace('''.py''', '''''' )]
SCREAMING_SNAKE_CASE__ : List[str] ='''.'''.join(UpperCamelCase__ )
return test_module_path
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =get_module_path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =importlib.import_module(UpperCamelCase__ )
return test_module
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : List[Any] =get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase__, UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Any =get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =getattr(UpperCamelCase__, UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE__ : Any =getattr(UpperCamelCase__, '''all_model_classes''', [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =test_class()
if hasattr(UpperCamelCase__, '''setUp''' ):
test.setUp()
SCREAMING_SNAKE_CASE__ : List[Any] =None
if hasattr(UpperCamelCase__, '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =test.model_tester.__class__
return model_tester
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =get_test_classes_for_model(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =[]
for test_class in test_classes:
SCREAMING_SNAKE_CASE__ : List[str] =get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x.__name__ )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int ={
model_class: get_test_classes_for_model(UpperCamelCase__, UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any ={
model_class: get_tester_classes_for_model(UpperCamelCase__, UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__, (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o | 152 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Tuple = """esm"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : List[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=10_26 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : List[Any]="absolute" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , mask_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = emb_layer_norm_before
__lowerCamelCase = token_dropout
__lowerCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
__lowerCamelCase = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = EsmFoldConfig(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
__lowerCamelCase = get_default_vocab_list()
else:
__lowerCamelCase = vocab_list
else:
__lowerCamelCase = None
__lowerCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , SCREAMING_SNAKE_CASE__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : int ) -> List[Any]:
__lowerCamelCase = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
a__ : str = None
a__ : bool = True
a__ : bool = False
a__ : bool = False
a__ : bool = False
a__ : float = 0
a__ : bool = True
a__ : bool = False
a__ : int = 128
a__ : "TrunkConfig" = None
def __A ( self : Any ) -> List[Any]:
if self.trunk is None:
__lowerCamelCase = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = TrunkConfig(**self.trunk )
def __A ( self : str ) -> Any:
__lowerCamelCase = asdict(self )
__lowerCamelCase = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
a__ : int = 48
a__ : int = 1_024
a__ : int = 128
a__ : int = 32
a__ : int = 32
a__ : int = 32
a__ : float = 0
a__ : float = 0
a__ : bool = False
a__ : int = 4
a__ : Optional[int] = 128
a__ : "StructureModuleConfig" = None
def __A ( self : Tuple ) -> List[str]:
if self.structure_module is None:
__lowerCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__lowerCamelCase = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def __A ( self : Dict ) -> Optional[Any]:
__lowerCamelCase = asdict(self )
__lowerCamelCase = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
a__ : int = 384
a__ : int = 128
a__ : int = 16
a__ : int = 128
a__ : int = 12
a__ : int = 4
a__ : int = 8
a__ : float = 0.1
a__ : int = 8
a__ : int = 1
a__ : int = 2
a__ : int = 7
a__ : int = 10
a__ : float = 1e-8
a__ : float = 1e5
def __A ( self : Optional[Any] ) -> str:
return asdict(self )
def __magic_name__ ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 339 |
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None:
if start is None:
__lowerCamelCase = 0
if end is None:
__lowerCamelCase = len(__lowerCAmelCase ) - 1
if start >= end:
return
__lowerCamelCase = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 339 | 1 |
'''simple docstring'''
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 83 |
def _lowerCAmelCase ( lowerCAmelCase_ :int | float | str )->tuple[int, int]:
'''simple docstring'''
try:
snake_case_ = float(lowerCAmelCase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
snake_case_ = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
snake_case_ = len(str(lowerCAmelCase_ ).split("." )[1] )
snake_case_ = int(decimal * (10**number_of_frac_digits) )
snake_case_ = 10**number_of_frac_digits
snake_case_ , snake_case_ = denominator, numerator
while True:
snake_case_ = dividend % divisor
if remainder == 0:
break
snake_case_ , snake_case_ = divisor, remainder
snake_case_ , snake_case_ = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 159 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
_lowerCAmelCase = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _SCREAMING_SNAKE_CASE ( A_ ):
__SCREAMING_SNAKE_CASE :Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE :List[str] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE :int = BertTokenizer
def __init__( self : Optional[int] , a__ : int=None , a__ : Optional[int]=None , a__ : Tuple=True , a__ : Optional[int]="[UNK]" , a__ : Any="[SEP]" , a__ : Any="[PAD]" , a__ : List[Any]="[CLS]" , a__ : Dict="[MASK]" , a__ : Union[str, Any]=True , a__ : Any=None , **a__ : int , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
__magic_name__ = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**_lowerCamelCase )
__magic_name__ = do_lower_case
def snake_case__ ( self : Optional[int] , a__ : List[Any] , a__ : List[Any]=None ):
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , a__ : str , a__ : Optional[str] = None ):
__magic_name__ = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 368 |
'''simple docstring'''
import functools
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = len(a )
__magic_name__ = len(a )
@functools.cache
def min_distance(a , a ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__magic_name__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a ) , 1 + min_distance(a , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__A ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _lowercase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__A ):
http_head("""https://huggingface.co""" )
| 349 |
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
"""simple docstring"""
from math import factorial
def snake_case ( A__ = 1_00 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 368 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase_ (__A ):
__magic_name__ = '''M-CLIP'''
def __init__( self : Any , lowerCAmelCase_ : str=1_024 , lowerCAmelCase_ : str=768 , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Tuple = transformerDimSize
UpperCAmelCase_ : List[str] = imageDimSize
super().__init__(**lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = MCLIPConfig
def __init__( self : str , lowerCAmelCase_ : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[Any] ) -> Any:
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = XLMRobertaModel(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Any = self.transformer(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
UpperCAmelCase_ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase_ ), embs
| 253 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__a , __a ) ) )
def SCREAMING_SNAKE_CASE__ ( __a ):
if point:
if isinstance(__a , __a ):
for item in point:
if not isinstance(__a , (int, float) ):
snake_case_ : str = (
'Expected a list of numbers as input, found '
f"""{type(__a ).__name__}"""
)
raise TypeError(__a )
else:
snake_case_ : Any = f"""Expected a list of numbers as input, found {type(__a ).__name__}"""
raise TypeError(__a )
else:
raise ValueError('Missing an input' )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
_validate_point(__a )
_validate_point(__a )
if len(__a ) != len(__a ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__a , __a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase :List[str] = 1_0_0
lowerCamelCase :Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase :int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
A_ : set[int] = set()
A_ : int
A_ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a ( lowerCamelCase__ = 50_00 ):
'''simple docstring'''
for number_to_partition in range(1 , lowerCamelCase__ ):
if len(partition(lowerCamelCase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }") | 135 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
if tokenize_kwargs is None:
A_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A_ : str = truncation
A_ : List[str] = tokenize_kwargs
A_ : Dict = {}
if return_tensors is not None:
A_ : List[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _a (self , lowercase , **lowercase ):
A_ : Optional[int] = self.framework
A_ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
return model_inputs
def _a (self , lowercase ):
A_ : str = self.model(**lowercase )
return model_outputs
def _a (self , lowercase , lowercase=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self , *lowercase , **lowercase ):
return super().__call__(*lowercase , **lowercase ) | 135 | 1 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_="." ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for k, v in d.items():
SCREAMING_SNAKE_CASE : List[str] = parent_key + sep + k if parent_key else k
if isinstance(lowerCamelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCamelCase_ , lowerCamelCase_ , sep=lowerCamelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = argparse.Namespace()
with open(lowerCamelCase_ , """r""" ) as yaml_file:
try:
SCREAMING_SNAKE_CASE : Dict = yaml.load(lowerCamelCase_ , Loader=yaml.FullLoader )
SCREAMING_SNAKE_CASE : str = flatten_yaml_as_dict(lowerCamelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(lowerCamelCase_ , str(lowerCamelCase_ ) ) )
return config
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTVaConfig()
SCREAMING_SNAKE_CASE : List[str] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
SCREAMING_SNAKE_CASE : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
SCREAMING_SNAKE_CASE : Any = 3_84
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 2_56
SCREAMING_SNAKE_CASE : Union[str, Any] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
SCREAMING_SNAKE_CASE : Dict = 3_84
else:
SCREAMING_SNAKE_CASE : Any = 2_56
SCREAMING_SNAKE_CASE : Dict = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
SCREAMING_SNAKE_CASE : Optional[int] = 1_51
SCREAMING_SNAKE_CASE : int = 5_12
SCREAMING_SNAKE_CASE : Union[str, Any] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Dict = True
elif task_name.startswith("""voc_""" ):
SCREAMING_SNAKE_CASE : str = 21
SCREAMING_SNAKE_CASE : int = 5_12
SCREAMING_SNAKE_CASE : List[str] = """pascal-voc-id2label.json"""
SCREAMING_SNAKE_CASE : Any = True
# orig_config
SCREAMING_SNAKE_CASE : Union[str, Any] = load_orig_config_file(lowerCamelCase_ )
assert getattr(lowerCamelCase_ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowerCamelCase_ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(lowerCamelCase_ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCamelCase_ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCamelCase_ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
SCREAMING_SNAKE_CASE : str = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase_ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
SCREAMING_SNAKE_CASE : str = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Tuple = idalabel
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
return config
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = val
def __A ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE : int = """"""
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = """mobilevitv2."""
SCREAMING_SNAKE_CASE : List[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE : str = k[8:]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = k
if ".block." in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
SCREAMING_SNAKE_CASE : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
SCREAMING_SNAKE_CASE : List[Any] = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
SCREAMING_SNAKE_CASE : str = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE : int = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE : Optional[int] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
SCREAMING_SNAKE_CASE : Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
SCREAMING_SNAKE_CASE : Tuple = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE : List[str] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE : str = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
SCREAMING_SNAKE_CASE : Optional[int] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE : Optional[Any] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(lowerCamelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_mobilevitva_config(lowerCamelCase_ , lowerCamelCase_ )
# load original state_dict
SCREAMING_SNAKE_CASE : Any = torch.load(lowerCamelCase_ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
SCREAMING_SNAKE_CASE : int = MobileViTVaForSemanticSegmentation(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : List[Any] = False
else:
SCREAMING_SNAKE_CASE : int = MobileViTVaForImageClassification(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE : List[str] = checkpoint
remove_unused_keys(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load modified state_dict
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCamelCase_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
SCREAMING_SNAKE_CASE : Dict = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = '''▁'''
_lowerCAmelCase : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCAmelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
_lowerCAmelCase : Optional[int] = {
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
_lowerCAmelCase : Optional[int] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self :Optional[int] , snake_case :str , snake_case :int="<s>" , snake_case :List[str]="</s>" , snake_case :Optional[Any]="</s>" , snake_case :Optional[int]="<s>" , snake_case :Any="<unk>" , snake_case :Tuple="<pad>" , snake_case :Tuple="<mask>" , snake_case :Dict=None , snake_case :int=None , snake_case :List[Any]=None , snake_case :Optional[Dict[str, Any]] = None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=False , **snake_case :Any , ):
'''simple docstring'''
A_ : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : Any = legacy_behaviour
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , tokenizer_file=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=snake_case , **snake_case , )
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
A_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A_ : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A_ : Dict = 1
A_ : Dict = len(self.sp_model )
A_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case )
}
A_ : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
A_ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A_ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A_ : Dict = src_lang if src_lang is not None else "eng_Latn"
A_ : List[str] = self.lang_code_to_id[self._src_lang]
A_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
A_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :str , snake_case :List[str] ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ : Tuple = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str ):
'''simple docstring'''
A_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[int] , snake_case :Optional[List[int]] = None , snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
A_ : int = [1] * len(self.prefix_tokens )
A_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case )) + suffix_ones
return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[str] , snake_case :Optional[str] , **snake_case :Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ : str = src_lang
A_ : str = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
A_ : Any = self.convert_tokens_to_ids(snake_case )
A_ : Optional[int] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :str ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : Dict = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict ):
'''simple docstring'''
A_ : str = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : List[str] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[str] , snake_case :str = "eng_Latn" , snake_case :Optional[List[str]] = None , snake_case :str = "fra_Latn" , **snake_case :Tuple , ):
'''simple docstring'''
A_ : List[Any] = src_lang
A_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
A_ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A_ : str = []
A_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Any = [self.cur_lang_code]
A_ : str = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str ):
'''simple docstring'''
A_ : str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A_ : Optional[int] = []
A_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : List[Any] = [self.cur_lang_code]
A_ : int = [self.eos_token_id]
| 70 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __snake_case ( ) -> tuple[list[int], int]:
A_ : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
A_ : List[str] = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : List[Any] = make_dataset()
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
A_ : Tuple = len(_lowerCAmelCase )
for i in range(n - 1 ):
A_ , A_ : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __snake_case ( ) -> tuple[float, float]:
A_ : Union[str, Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
A_ : Tuple = "\ntriplet_sum1(*dataset)\n"
A_ : Optional[Any] = "\ntriplet_sum2(*dataset)\n"
A_ : List[str] = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 )
A_ : Tuple = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=10000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 70 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.